code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*-coding:utf-8-*-
# 标准库
import time
import sys
import codecs
import os
import json
import io
# 第三方库
import requests
from huzhifeng import dumpObj, hasKeys
from openpyxl import load_workbook
from openpyxl import Workbook
# Set default encoding to utf-8
reload(sys)
sys.setdefaultencoding('utf-8')
# requests.packages.urllib3.disable_warnings()
# 全局变量
PWD = sys.path[0]
SAVE_PATH = PWD + '/result/'
RET_OK = 0
RET_ERR = -1
MAX_TRIES = 3
MAX_DAYS = 60
stationNameCodeMap = {}
stationCodeNameMap = {}
startTimeMap = {}
arriveTimeMap = {}
EXCEL_SHEET_NUMBER = 24
cityList = []
START_EXCEL_NAME = 'start.xlsx'
ARRIVE_EXCEL_NAME = 'arrive.xlsx'
SUM_EXCEL_NAME = 'sum.xlsx'
startWB = Workbook()
arriveWB = Workbook()
sumWB = Workbook()
def queryTickets(queryDate, from_station_code, to_station_code):
time.sleep(1)
parameters = [
('leftTicketDTO.train_date', queryDate),
('leftTicketDTO.from_station', from_station_code),
('leftTicketDTO.to_station', to_station_code),
('purpose_codes', "ADULT"),
]
headers = {
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-TW;q=0.2',
'Cache-Control':'no-cache',
#'Host': 'kyfw.12306.cn',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
}
try:
r = requests.get('https://kyfw.12306.cn/otn/leftTicket/queryA', params=parameters, headers=headers,
timeout=10, verify=False)
while r.url == "http://www.12306.cn/mormhweb/logFiles/error.html":
r = requests.get('https://kyfw.12306.cn/otn/leftTicket/queryA', params=parameters, headers=headers,
timeout=10, verify=False)
print r.url
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError):
try:
print(u'网络问题,等它三秒试试')
time.sleep(3)
r = requests.get('https://kyfw.12306.cn/otn/leftTicket/queryA', params=parameters,
timeout=8, verify=False)
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError) as e:
print e
fromCity = stationCodeNameMap[from_station_code]
toCity = stationCodeNameMap[to_station_code]
saveData = {'fromCity': u'' + fromCity,
'toCity': u'' + toCity,
'queryDate': queryDate}
with io.open(PWD + '/config.json', 'w', encoding='utf-8') as outfile:
outfile.write(unicode(json.dumps(saveData, ensure_ascii=False)))
outfile.close()
startWB.save(SAVE_PATH + START_EXCEL_NAME)
arriveWB.save(SAVE_PATH + ARRIVE_EXCEL_NAME)
sumWB.save(SAVE_PATH + SUM_EXCEL_NAME)
print "休息23秒,准备重新启动程序"
time.sleep(23)
start()
return RET_OK
print(">>>>>>>>>>>>>>>>>>>>>")
obj = r.json()
if hasKeys(obj, ['status', 'httpstatus', 'data']):
return obj
else:
print(u'4查询失败')
print obj
if hasKeys(obj, ['messages']):
dumpObj(obj['messages'])
return RET_ERR
# 根据cityLookUp初始化stationNameCodeMap 和 stationCodeNameMap
def initStation():
f = codecs.open(PWD + "/cityLookUp.txt", "r")
data = f.readline()
f.close()
station_list = data.split('@')
if len(station_list) < 1:
print(u'站点数据库初始化失败, 数据异常')
return None
station_list = station_list[1:]
for station in station_list:
items = station.split('|') # bji|北京|BJP|beijing|bj|2
if len(items) < 5:
print(u'忽略无效站点: %s' % (items))
continue
stationNameCodeMap[items[1].decode('utf-8')] = items[2]
stationCodeNameMap[items[2]] = items[1]
return stationNameCodeMap
def getStationByName(name):
if stationNameCodeMap.has_key(name):
return stationNameCodeMap[name]
else:
print name + "没找到"
return 0
def initCitiesList():
f = open(PWD + "/cities.txt", "r")
global cityList
cityList = f.readlines()
f.close()
def start():
#print(u"hello,运行中不要打开result目录中的start.xls和arrive.xls,如果打开了现在关闭还来得及")
initStation()
initCitiesList()
initialWB()
with open(PWD + '/config.json', 'r') as f:
crashSave = json.load(f)
startCityFind = False
toCityFind = False
queryDate = crashSave['queryDate']
print queryDate
for fromCityIndex in range(len(cityList)):
fromCity = cityList[fromCityIndex]
fromCity = fromCity.split('\r')[0].decode('utf-8')
if startCityFind:
pass
else:
if fromCity == crashSave['fromCity']:
startCityFind = True
else:
continue
result = fromCity + "\t:"
for toCityIndex in range(len(cityList)):
toCity = cityList[toCityIndex]
toCity = toCity.split('\r')[0].decode('utf-8')
print toCity
if toCityFind:
pass
else:
if toCity == crashSave['toCity']:
toCityFind = True
else:
continue
initMap()
if fromCity == toCity:
number = '/'
else:
if fromCity == '莱芜':
number = 0
for fromCity2 in ['莱芜东', '莱芜西']:
fromStationCode = getStationByName(fromCity2)
toStationCode = getStationByName(toCity)
if toStationCode == 0:
number = '/'
break
else:
trains = queryTickets(queryDate, fromStationCode, toStationCode)
if trains == RET_ERR or trains == RET_OK:
break
number += len(trains['data'])
countByTime(trains['data'])
elif toCity == '莱芜':
number = 0
for toCity2 in ['莱芜东', '莱芜西']:
fromStationCode = getStationByName(fromCity)
toStationCode = getStationByName(toCity2)
if fromStationCode == 0:
number = '/'
break
else:
trains = queryTickets(queryDate, fromStationCode, toStationCode)
if trains == RET_ERR or trains == RET_OK:
break
number += len(trains['data'])
countByTime(trains['data'])
else:
fromStationCode = getStationByName(fromCity)
toStationCode = getStationByName(toCity)
print fromStationCode
if fromStationCode == 0 or toStationCode == 0:
number = '/'
else:
trains = queryTickets(queryDate, fromStationCode, toStationCode)
if trains == RET_ERR or trains == RET_OK:
break
number = len(trains['data'])
countByTime(trains['data'])
print (fromCity + "->" + toCity + ":" + str(number))
result = result + str(number) + ','
print startTimeMap
print arriveTimeMap
for hour in range(EXCEL_SHEET_NUMBER):
startSheet = startWB.get_sheet_by_name(str(hour))
arriveSheet = arriveWB.get_sheet_by_name(str(hour))
sumSheet = sumWB.get_sheet_by_name("sum")
if number == '/':
startSheet.cell(row=fromCityIndex + 2, column=toCityIndex + 2, value='/')
arriveSheet.cell(row=fromCityIndex + 2, column=toCityIndex + 2, value='/')
sumSheet.cell(row=fromCityIndex + 2, column=toCityIndex + 2, value='/')
else:
startSheet.cell(row=fromCityIndex + 2, column=toCityIndex + 2, value=str(startTimeMap[hour]))
arriveSheet.cell(row=fromCityIndex + 2, column=toCityIndex + 2, value=str(arriveTimeMap[hour]))
sumSheet.cell(row=fromCityIndex + 2, column=toCityIndex + 2, value=str(number))
startWB.save(filename=SAVE_PATH + START_EXCEL_NAME)
arriveWB.save(filename=SAVE_PATH + ARRIVE_EXCEL_NAME)
sumWB.save(filename=SAVE_PATH + SUM_EXCEL_NAME)
print("*********all over*************")
def countByTime(data):
for train in data:
if train['queryLeftNewDTO']['controlled_train_flag'] == '0': # 过滤掉停运被控车次
startHour = int(train['queryLeftNewDTO']['start_time'].split(':')[0])
startTimeMap[startHour] += 1
arriveHour = int(train['queryLeftNewDTO']['arrive_time'].split(':')[0])
arriveTimeMap[arriveHour] += 1
# 判断start.xls和arrive.xls是否已存在于result目录下,存在则读取;不存在就创建
def initialWB():
files = [f for f in os.listdir(SAVE_PATH) if os.path.isfile(SAVE_PATH + f)]
startInitial = False
arriveInitial = False
sumInitial = False
for f in files:
if f == ARRIVE_EXCEL_NAME:
global arriveWB
arriveWB = load_workbook(filename=SAVE_PATH + ARRIVE_EXCEL_NAME)
arriveInitial = True
elif f == START_EXCEL_NAME:
global startWB
startWB = load_workbook(filename=SAVE_PATH + START_EXCEL_NAME)
startInitial = True
elif f == SUM_EXCEL_NAME:
global sumWB
sumWB = load_workbook(filename=SAVE_PATH + SUM_EXCEL_NAME)
sumInitial = True
if not startInitial:
writeSheetCityName(startWB)
if not arriveInitial:
writeSheetCityName(arriveWB)
if not sumInitial:
ws = sumWB.create_sheet(title="sum")
for i in range(len(cityList)):
ws.cell(column=1, row=i + 2, value=cityList[i].split('\n')[0])
ws.cell(column=i + 2, row=1, value=cityList[i].split('\n')[0])
# 在excel中每个sheet页第一行和第一列写城市名
def writeSheetCityName(wb):
for index in range(0, EXCEL_SHEET_NUMBER):
ws = wb.create_sheet(title=str(index))
for i in range(len(cityList)):
ws.cell(column=1, row=i + 2, value=cityList[i].split('\n')[0])
ws.cell(column=i + 2, row=1, value=cityList[i].split('\n')[0])
def initMap():
for index in range(EXCEL_SHEET_NUMBER):
startTimeMap[index] = 0
arriveTimeMap[index] = 0
if __name__ == '__main__':
start()
|
hitjackma/12306Spider
|
query.py
|
Python
|
gpl-3.0
| 11,179
|
#!/usr/bin/env python3
import os, sys
sys.path.append('../modules')
import numpy as np
import matplotlib.pyplot as plt
import raytracing as rt
import visualize as vis
import ray_utilities
if __name__ == '__main__':
# Create a spectrometer using a simple 4f system and diffraction grating
f = 50 # Focal length of all lenses
aperture = 25.4 # Size of lenses
npoints = 3 # Number of light source points
nrays = 5 # Number of light rays per point
ymax = -0.1 # Limit of source plane. Controls spectral resolution
ymin = 0.1
ngroves = 600 # Grove density of diffraction grating
# Simulate system for these wavelengths
lmb = list(np.linspace(400, 700, 11)*1e-9)
components = []
rays = []
image_plane = -200
nrays = 20
# Create three scene points
scene = np.zeros((2, npoints))
scene[1, :] = np.linspace(ymin, ymax, npoints)
# Place a collimation lens
components.append(rt.Lens(f=f,
aperture=aperture,
pos=[f, 0],
theta=0))
# Place a diffraction grating
components.append(rt.Grating(ngroves=ngroves,
aperture=aperture,
pos=[2*f, 0],
theta=0))
# Place a lens such that the central wavelength is centered on the sensor
theta_design = np.arcsin(lmb[len(lmb)//2]/(1e-3/ngroves))
x1 = 2*f + f*np.cos(-theta_design)
y1 = f*np.sin(-theta_design)
components.append(rt.Lens(f=f,
aperture=aperture,
pos=[x1, y1],
theta=theta_design))
# Place a sensor
x2 = x1 + f*np.cos(-theta_design)
y2 = y1 + f*np.sin(-theta_design)
components.append(rt.Sensor(aperture=aperture,
pos=[x2, y2],
theta=theta_design))
# Get the initial rays
[rays, ptdict, colors] = ray_utilities.initial_rays(scene,
components[0],
nrays)
# Create rainbow colors
colors = vis.get_colors(len(lmb), nrays*npoints, cmap='rainbow')
# Create a new canvas
canvas = vis.Canvas([-5, 4.1*f], [-2*aperture, 2*aperture])
# Draw the components
canvas.draw_components(components)
# Draw the rays for each wavelength
for idx in range(len(lmb)):
canvas.draw_rays(rt.propagate_rays(components, rays,
lmb=lmb[idx]), colors[idx],
linewidth=0.2)
# Show the system
canvas.show()
# Save a copy
canvas.save('grating.png')
|
vishwa91/OptSys
|
examples/grating.py
|
Python
|
mit
| 2,828
|
from django.contrib import admin
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', auth_views.login, {'template_name': 'sheepguard/login.html'}),
url(r'^accounts/logout', auth_views.logout, {'next_page': '/'}, name='logout'),
url(r'^sheep/', include('sheep.urls', namespace='sheep')),
url(r'^customers', include('customer.urls', namespace='customers')),
url(r'^$', 'SheepGuard.views.index'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
BaileySN/SheepGuard
|
SheepGuard/urls.py
|
Python
|
gpl-3.0
| 808
|
import matplotlib.pyplot as plt
plt.style.use('science')
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
fig, host = plt.subplots()
fig.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
par2.spines["right"].set_position(("axes", 1.2))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(par2)
# Second, show the right spine.
par2.spines["right"].set_visible(True)
p1, = host.plot([0, 1, 2], [0, 1, 2], "b-", label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], "r-", label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], "g-", label="Velocity")
host.set_xlim(0, 2)
host.set_ylim(0, 2)
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.set_xlabel("Distance")
host.set_ylabel("Density")
par1.set_ylabel("Temperature")
par2.set_ylabel("Velocity")
host.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
par2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
host.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
host.tick_params(axis='x', **tkw)
lines = [p1, p2, p3]
host.legend(lines, [l.get_label() for l in lines])
plt.show()
|
InnovArul/codesmart
|
computer_vision/paperplots/example.py
|
Python
|
gpl-2.0
| 1,602
|
import random
from zen.graph import Graph
from zen.digraph import DiGraph
from zen.exceptions import ZenException
__all__ = ['local_attachment']
def local_attachment(n, m, r, **kwargs):
"""
Generate a random graph using the local attachment model.
**Args**:
* ``n`` (int): the number of nodes to add to the graph
* ``m`` (int): the number of edges a new node will add to the graph
* ``r`` (int): the number of edges (of the ``m``) that a node will add to uniformly selected random nodes.
All others will be added to neighbors of the ``r`` selected nodes.
**KwArgs**:
* ``seed [=-1]`` (int): a seed for the random number generator
* ``graph [=None]`` (:py:class:`zen.DiGraph`): the graph that will be populated. If the graph is ``None``,
then a new graph will be created.
**Returns**:
:py:class:`zen.DiGraph`. The graph generated.
.. note::
Source: M. O. Jackson and B. O. Rogers "Meeting strangers and friends of friends: How random are social networks?" The American Economic Review, 2007.
"""
seed = kwargs.pop('seed',None)
graph = kwargs.pop('graph',None)
if graph is not None and not graph.is_directed():
raise ZenException, 'The graph provided must be directed'
if graph is not None and len(graph) > 0:
raise ZenException, 'The graph provided is not empty'
if len(kwargs) > 0:
raise ZenException, 'Unknown arguments: %s' % ', '.join(kwargs.keys())
if type(r) != int:
raise ZenException, 'r must be an integer'
elif r < 1:
raise ZenException, 'r must be 1 or larger'
if seed is None:
seed = -1
if seed >= 0:
random.seed(seed)
#####
# build the initial graph
G = graph
if G is None:
G = DiGraph()
# populate with nodes
for i in range(m+1):
G.add_node(i)
# according to Jackson's paper, all initial nodes have m neighbors.
for i in range(m+1):
for j in range(m+1):
if j != i:
G.add_edge(j,i)
######
# Build the rest of the graph
node_list = list(range(m+1))
for i in range(m+1,n):
G.add_node(i)
# pick random neighbors (the parents)
parents = random.sample(node_list,r)
# pick neighbors from the parents' neighborhoods
potentials = set()
for n in parents:
potentials.update(G.out_neighbors(n))
potentials.difference_update(parents)
nsize = min([m-r,len(potentials)])
neighbors = random.sample(potentials,nsize)
# connect
for v in (parents + neighbors):
G.add_edge(i,v)
node_list.append(i)
# done
return G
if __name__ == '__main__':
from zen.drawing import UbigraphRenderer
G = DiGraph()
ur = UbigraphRenderer('http://localhost:20738/RPC2',event_delay=0.5,graph=G)
G = local_attachment(100, 6, 4, graph=G)
|
networkdynamics/zenlib
|
src/zen/generating/local.py
|
Python
|
bsd-3-clause
| 2,686
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from mock import MagicMock
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.context import Context
from pants.util.dirutil import safe_rmtree
from pants_test.tasks.task_test_base import TaskTestBase
from twitter.common.collections import OrderedSet
from pants.contrib.scrooge.tasks.scrooge_gen import ScroogeGen
# TODO (tdesai) Issue-240: Use JvmToolTaskTestBase for ScroogeGenTest
class ScroogeGenTest(TaskTestBase):
@classmethod
def task_type(cls):
return ScroogeGen
@property
def alias_groups(self):
return BuildFileAliases(targets={'java_thrift_library': JavaThriftLibrary})
def setUp(self):
super(ScroogeGenTest, self).setUp()
self.task_outdir = os.path.join(self.build_root, 'scrooge', 'gen-java')
def tearDown(self):
super(ScroogeGenTest, self).tearDown()
safe_rmtree(self.task_outdir)
def test_validate_compiler_configs(self):
# Set synthetic defaults for the global scope.
self.set_options_for_scope('thrift-defaults',
compiler='unchecked',
language='uniform',
rpc_style='async')
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='one',
sources=[],
dependencies=[],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='two',
sources=[],
dependencies=[':one'],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='three',
sources=[],
dependencies=[':one'],
rpc_style='finagle',
)
'''))
target = self.target('test_validate:one')
context = self.context(target_roots=[target])
task = self.create_task(context)
task._validate_compiler_configs([self.target('test_validate:one')])
task._validate_compiler_configs([self.target('test_validate:two')])
with self.assertRaises(TaskError):
task._validate_compiler_configs([self.target('test_validate:three')])
def test_scala(self):
build_string = '''
java_thrift_library(name='a',
sources=['a.thrift'],
dependencies=[],
compiler='scrooge',
language='scala',
rpc_style='finagle'
)
'''
sources = [os.path.join(self.task_outdir, 'org/pantsbuild/example/Example.scala')]
self._test_help(build_string, ScalaLibrary, sources)
def test_android(self):
build_string = '''
java_thrift_library(name='a',
sources=['a.thrift'],
dependencies=[],
compiler='scrooge',
language='android',
rpc_style='finagle'
)
'''
sources = [os.path.join(self.task_outdir, 'org/pantsbuild/android_example/Example.java')]
self._test_help(build_string, JavaLibrary, sources)
def _test_help(self, build_string, library_type, sources):
contents = dedent('''#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
struct Example {
1: optional i64 number
}
''')
self.create_file(relpath='test_smoke/a.thrift', contents=contents)
self.add_to_build_file('test_smoke', dedent(build_string))
target = self.target('test_smoke:a')
context = self.context(target_roots=[target])
task = self.create_task(context)
task._declares_service = lambda source: False
task._outdir = MagicMock()
task._outdir.return_value = self.task_outdir
task.gen = MagicMock()
task.gen.return_value = {'test_smoke/a.thrift': sources}
saved_add_new_target = Context.add_new_target
try:
mock = MagicMock()
Context.add_new_target = mock
task.execute()
self.assertEquals(1, mock.call_count)
_, call_kwargs = mock.call_args
self.assertEquals(call_kwargs['target_type'], library_type)
self.assertEquals(call_kwargs['dependencies'], OrderedSet())
self.assertEquals(call_kwargs['provides'], None)
self.assertEquals(call_kwargs['sources'], [])
self.assertEquals(call_kwargs['derived_from'], target)
finally:
Context.add_new_target = saved_add_new_target
|
jtrobec/pants
|
contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen.py
|
Python
|
apache-2.0
| 4,867
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CloudJob(Model):
"""An Azure Batch job.
:param id: A string that uniquely identifies the job within the account.
The ID is case-preserving and case-insensitive (that is, you may not have
two IDs within an account that differ only by case).
:type id: str
:param display_name: The display name for the job.
:type display_name: str
:param uses_task_dependencies: Whether tasks in the job can define
dependencies on each other. The default is false.
:type uses_task_dependencies: bool
:param url: The URL of the job.
:type url: str
:param e_tag: The ETag of the job. This is an opaque string. You can use
it to detect whether the job has changed between requests. In particular,
you can be pass the ETag when updating a job to specify that your changes
should take effect only if nobody else has modified the job in the
meantime.
:type e_tag: str
:param last_modified: The last modified time of the job. This is the last
time at which the job level data, such as the job state or priority,
changed. It does not factor in task-level changes such as adding new tasks
or tasks changing state.
:type last_modified: datetime
:param creation_time: The creation time of the job.
:type creation_time: datetime
:param state: The current state of the job. Possible values include:
'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed',
'deleting'
:type state: str or ~azure.batch.models.JobState
:param state_transition_time: The time at which the job entered its
current state.
:type state_transition_time: datetime
:param previous_state: The previous state of the job. This property is not
set if the job is in its initial Active state. Possible values include:
'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed',
'deleting'
:type previous_state: str or ~azure.batch.models.JobState
:param previous_state_transition_time: The time at which the job entered
its previous state. This property is not set if the job is in its initial
Active state.
:type previous_state_transition_time: datetime
:param priority: The priority of the job. Priority values can range from
-1000 to 1000, with -1000 being the lowest priority and 1000 being the
highest priority. The default value is 0.
:type priority: int
:param constraints: The execution constraints for the job.
:type constraints: ~azure.batch.models.JobConstraints
:param job_manager_task: Details of a Job Manager task to be launched when
the job is started.
:type job_manager_task: ~azure.batch.models.JobManagerTask
:param job_preparation_task: The Job Preparation task. The Job Preparation
task is a special task run on each node before any other task of the job.
:type job_preparation_task: ~azure.batch.models.JobPreparationTask
:param job_release_task: The Job Release task. The Job Release task is a
special task run at the end of the job on each node that has run any other
task of the job.
:type job_release_task: ~azure.batch.models.JobReleaseTask
:param common_environment_settings: The list of common environment
variable settings. These environment variables are set for all tasks in
the job (including the Job Manager, Job Preparation and Job Release
tasks). Individual tasks can override an environment setting specified
here by specifying the same setting name with a different value.
:type common_environment_settings:
list[~azure.batch.models.EnvironmentSetting]
:param pool_info: The pool settings associated with the job.
:type pool_info: ~azure.batch.models.PoolInformation
:param on_all_tasks_complete: The action the Batch service should take
when all tasks in the job are in the completed state. noAction - do
nothing. The job remains active unless terminated or disabled by some
other means. terminateJob - terminate the job. The job's terminateReason
is set to 'AllTasksComplete'. The default is noAction. Possible values
include: 'noAction', 'terminateJob'
:type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete
:param on_task_failure: The action the Batch service should take when any
task in the job fails. A task is considered to have failed if has a
failureInfo. A failureInfo is set if the task completes with a non-zero
exit code after exhausting its retry count, or if there was an error
starting the task, for example due to a resource file download error.
noAction - do nothing. performExitOptionsJobAction - take the action
associated with the task exit condition in the task's exitConditions
collection. (This may still result in no action being taken, if that is
what the task specifies.) The default is noAction. Possible values
include: 'noAction', 'performExitOptionsJobAction'
:type on_task_failure: str or ~azure.batch.models.OnTaskFailure
:param metadata: A list of name-value pairs associated with the job as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
:param execution_info: The execution information for the job.
:type execution_info: ~azure.batch.models.JobExecutionInformation
:param stats: Resource usage statistics for the entire lifetime of the
job.
:type stats: ~azure.batch.models.JobStatistics
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'},
'url': {'key': 'url', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'JobState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'JobState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
'priority': {'key': 'priority', 'type': 'int'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'},
'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'},
'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'},
'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'},
'stats': {'key': 'stats', 'type': 'JobStatistics'},
}
def __init__(self, id=None, display_name=None, uses_task_dependencies=None, url=None, e_tag=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, execution_info=None, stats=None):
self.id = id
self.display_name = display_name
self.uses_task_dependencies = uses_task_dependencies
self.url = url
self.e_tag = e_tag
self.last_modified = last_modified
self.creation_time = creation_time
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
self.priority = priority
self.constraints = constraints
self.job_manager_task = job_manager_task
self.job_preparation_task = job_preparation_task
self.job_release_task = job_release_task
self.common_environment_settings = common_environment_settings
self.pool_info = pool_info
self.on_all_tasks_complete = on_all_tasks_complete
self.on_task_failure = on_task_failure
self.metadata = metadata
self.execution_info = execution_info
self.stats = stats
|
AutorestCI/azure-sdk-for-python
|
azure-batch/azure/batch/models/cloud_job.py
|
Python
|
mit
| 9,338
|
class Solution:
# @return a boolean
def isInterleave(self, s1, s2, s3):
if len(s1) + len(s2) != len(s3):
return False
opt = [[False for i in xrange(len(s2)+1)] for i in xrange(len(s1)+1)]
opt[0][0] = True
for i in xrange(1, len(s1)+1):
opt[i][0] = opt[i-1][0] and s1[i-1] == s3[i-1]
for i in xrange(1, len(s2)+1):
opt[0][i] = opt[0][i-1] and s2[i-1] == s3[i-1]
for i in xrange(1, len(s1)+1):
for j in xrange(1, len(s2)+1):
last_of_s3 = s3[i+j-1]
last_of_s2 = s2[j-1]
last_of_s1 = s1[i-1]
if (last_of_s1 == last_of_s3 and opt[i-1][j]) or (last_of_s2 == last_of_s3 and opt[i][j-1]):
opt[i][j] = True
return opt[len(s1)][len(s2)]
if __name__ == "__main__":
solution = Solution()
print solution.isInterleave("aabcc", "dbbca", "aadbbcbcac")
print solution.isInterleave("aabcc", "dbbca", "aadbbbaccc")
print solution.isInterleave("bbbbbabbbbabaababaaaabbababbaaabbabbaaabaaaaababbbababbbbbabbbbababbabaabababbbaabababababbbaaababaa", "babaaaabbababbbabbbbaabaabbaabbbbaabaaabaababaaaabaaabbaaabaaaabaabaabbbbbbbbbbbabaaabbababbabbabaab", "babbbabbbaaabbababbbbababaabbabaabaaabbbbabbbaaabbbaaaaabbbbaabbaaabababbaaaaaabababbababaababbababbbababbbbaaaabaabbabbaaaaabbabbaaaabbbaabaaabaababaababbaaabbbbbabbbbaabbabaabbbbabaaabbababbabbabbab")
|
lsingal/leetcode
|
python/dynamic_programming/InterleavingString.py
|
Python
|
mit
| 1,501
|
import _plotly_utils.basevalidators
class SizerefValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="sizeref", parent_name="scatterpolar.marker", **kwargs
):
super(SizerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatterpolar/marker/_sizeref.py
|
Python
|
mit
| 468
|
from datetime import datetime, time
from hashlib import md5
class CrawlUtils(object):
def __init__(self):
super(CrawlUtils, self).__init__()
@classmethod
def get_guid(self, _url):
"""Generates an unique identifier for a given item."""
# hash based solely in the url field
return md5(_url).hexdigest()
|
trujunzhang/djzhang-targets
|
cwitune/cwitune/utils/crawl_utils.py
|
Python
|
mit
| 349
|
#! /usr/bin/env python
# Solving a 9x9 Sudoku puzzle (54 numbers missing).
#
# Copyright (C) 2013 Efstathios Chatzikyriakidis <contact@efxa.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
from SudokuSolver import SudokuGA
except (ImportError) as error:
import sys, os
sys.exit('{0}: {1}.'.format(os.path.basename(__file__), error))
parameters = {
'populationSize': 8000,
'mutationProbability': 0.012,
'tournamentSize': 5,
'candidateGenotype': [[2, 0, 0, 8, 0, 0, 0, 7, 0],
[0, 0, 9, 0, 0, 1, 0, 0, 3],
[0, 1, 0, 3, 0, 0, 0, 2, 0],
[0, 6, 0, 0, 2, 0, 0, 9, 0],
[0, 0, 5, 0, 0, 0, 7, 0, 8],
[0, 0, 3, 0, 8, 0, 6, 0, 0],
[6, 0, 0, 5, 0, 0, 0, 1, 0],
[0, 9, 0, 0, 1, 0, 4, 0, 0],
[0, 0, 4, 0, 3, 0, 0, 0, 8]]
}
ga = SudokuGA(parameters)
solution = ga.evolution()
print solution
|
rfribeiro/sudoku-ga
|
examples/ga/example-6.py
|
Python
|
gpl-3.0
| 1,499
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.browser.exceptions import BrowserHTTPNotFound
from weboob.browser import PagesBrowser, URL
from .pages import RecipePage, ResultsPage
__all__ = ['MarmitonBrowser']
class MarmitonBrowser(PagesBrowser):
BASEURL = 'http://www.marmiton.org/'
search = URL('recettes/recherche.aspx\?aqt=(?P<pattern>.*)', ResultsPage)
recipe = URL('recettes/recette_(?P<id>.*).aspx', RecipePage)
def iter_recipes(self, pattern):
return self.search.go(pattern=pattern).iter_recipes()
def get_recipe(self, id, recipe=None):
try:
recipe = self.recipe.go(id=id).get_recipe(obj=recipe)
comments = list(self.page.get_comments())
if comments:
recipe.comments = comments
return recipe
except BrowserHTTPNotFound:
return
|
laurent-george/weboob
|
modules/marmiton/browser.py
|
Python
|
agpl-3.0
| 1,560
|
# -*- coding: utf-8 -*-mode
import numpy as np
import sys
from scipy.optimize import fmin_bfgs
class logisticRegression:
def __init__(self):
# do nothing particularly
pass
def fit(self,data,label):
# data is to be given in a two dimensional numpy array (nData,nVariables)
# label is to be given in an one dimensional numpy array (nData,) which contains value of either 0 or 1
self.data = np.hstack((np.ones((data.shape[0],1)),data))
self.label = label
self.nData = data.shape[0]
self.nEta = data.shape[1]+1
self.eta = fmin_bfgs(self.costFunction,np.zeros(self.nEta),fprime=self.gradient)
def predict(self,data):
# data is to be given in a two dimensional numpy array (nData,nVariables)
data = np.hstack((np.ones((data.shape[0],1)),data))
return self.sigmoid(np.dot(data,self.eta))
def sigmoid(self,z):
# define sigmoid function
return 1.0 / ( 1.0+np.exp(-z) )
def costFunction(self,eta):
# return cost to minimize
prob = self.sigmoid(np.dot(self.data,eta))
prob_ = 1.0-prob
np.place(prob,prob==0.0,sys.float_info.min)
np.place(prob_,prob_==0.0,sys.float_info.min)
self.cost = np.dot(self.label,np.log(prob))
self.cost += np.dot((1.0-self.label),np.log(prob_))
return -self.cost
def gradient(self,eta):
# return gradient of cost function
grad = np.zeros(self.nEta)
for i in range(grad.shape[0]):
grad[i] = ( (self.sigmoid(np.dot(self.data,eta))-self.label)*self.data[:,i] ).sum()
return grad
def summary(self):
# calculate deviance, residual deviance, and Akaike Information Criterion (AIC)
deviance = -2*self.cost
fullmodel = -np.log(1.0)*self.nData
rDeviance = deviance-fullmodel
aic = deviance+2*self.nEta
# display information
print "Estimated Regression Coefficient Values:"
print self.eta
print "Log Likelihood:\t\t %f (df=%d)"%(self.cost,self.nEta)
print "Residual Deviance:\t %f"%(rDeviance)
print "AIC:\t\t\t %f"%(aic)
def save(self,name):
# save object as a file
with open(name,"wb") as output:
cPickle.dump(self.__dict__,output,protocol=cPickle.HIGHEST_PROTOCOL)
def load(self,name):
# load object from a file
with open(name,"rb") as input:
self.__dict__.update(cPickle.load(input))
|
kyoheiotsuka/logisticRegression
|
logisticRegression.py
|
Python
|
mit
| 2,207
|
# Copyright (c) 2016, German Neuroinformatics Node (G-Node)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
def sanitizer(name):
"""
Sanitizes a string supposed to be an entity name. That is,
invalid characters like slashes are substituted with underscores.
:param name: A string representing the name.
:returns: The sanitized name.
:rtype: str
"""
return name.replace("/", "_")
def check(name):
"""
Checks a string whether is needs to be sanitized.
:param name: The name.
:returns: True if the name is valid, false otherwise.
:rtype: bool
"""
if isinstance(name, bytes):
name = name.decode()
return "/" not in name
|
stoewer/nixpy
|
nixio/pycore/util/names.py
|
Python
|
bsd-3-clause
| 967
|
from __future__ import with_statement
import datetime
import os
import unittest
from datafeed.exchange import *
from datafeed.providers.yahoo import *
class YahooSecurityTest(unittest.TestCase):
def test_abbr_sha(self):
s = YahooSecurity(SH(), '600028')
self.assertEqual(s._abbr, 'SS')
def test_abbr_she(self):
s = YahooSecurity(SZ(), '000001')
self.assertEqual(s._abbr, 'SZ')
def test_yahoo_id(self):
s = YahooSecurity(SH(), '600028')
self.assertEqual(str(s), '600028.SS')
def test_abbr_to_exchange(self):
ex = YahooSecurity.get_exchange_from_abbr("SS")
self.assertEqual(ex, SH())
def test_ss_abbr(self):
ret = YahooSecurity.from_string('600028.SS')
self.assertEqual(ret.exchange, SH())
self.assertEqual(ret.symbol, '600028')
self.assertEqual(str(ret), '600028.SS')
class YahooReportTest(unittest.TestCase):
_RAW_DATA = '''"GOOG",533.89,"5/3/2011","4:00pm",-4.67,537.13,542.01,529.63,2081574
"AAPL",348.20,"5/3/2011","4:00pm",+1.92,347.91,349.89,345.62,11198607
"600028.SS",8.58,"5/4/2011","1:47am",-0.10,8.64,8.67,8.55,23045288'''
def test_yahoo_report(self):
ret = YahooReport.parse(self._RAW_DATA)
i = 0
for r in ret:
if i == 0:
self.assertEqual(r.security.exchange, YahooNA())
self.assertEqual(r.security.symbol, 'GOOG')
self.assertEqual(str(r.date), "2011-05-03")
self.assertEqual(r.time.hour, 16)
self.assertEqual(r.time.minute, 0)
self.assertEqual(r.price, 533.89)
self.assertEqual(r.change, -4.67)
self.assertEqual(r.open, 537.13)
self.assertEqual(r.high, 542.01)
self.assertEqual(r.low, 529.63)
self.assertEqual(r.volume, 2081574)
if i == 2:
self.assertEqual(r.security.exchange, SH())
self.assertEqual(r.security.symbol, '600028')
i += 1
self.assertEqual(i, 3)
class YahooDayTest(unittest.TestCase):
def test_parse_day(self):
path = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(path, 'yahoo_tables.csv'), 'r')
data = f.read()
f.close()
security = YahooSecurity(YahooNA(), 'GOOG')
iters = YahooDay.parse(security, data)
i = 0
for ohlc in iters:
if i == 0:
# 2011-05-03,537.13,542.01,529.63,533.89,2081500,533.89
self.assertEqual(str(ohlc.date), "2011-05-03")
self.assertEqual(ohlc.open, 537.13)
self.assertEqual(ohlc.high, 542.01)
self.assertEqual(ohlc.low, 529.63)
self.assertEqual(ohlc.close, 533.89)
self.assertEqual(ohlc.volume, 2081500)
self.assertEqual(ohlc.adjclose, 533.89)
i += 1
class YahooReportFetcherTest(unittest.TestCase):
def test_init(self):
f = YahooReportFetcher()
self.assertEqual(f._base_url, 'http://download.finance.yahoo.com/d/quotes.csv')
def test_init_with_arguments(self):
f = YahooReportFetcher(time_out=10, request_size=50)
self.assertEqual(f._time_out, 10)
self.assertEqual(f._request_size, 50)
def test_init_with_wrong_arguments(self):
self.assertRaises(AssertionError,
YahooReportFetcher,
request_size=200)
def test_fetch(self):
f = YahooReportFetcher(request_size=2)
s1 = YahooSecurity(YahooNA(), 'GOOG')
s2 = YahooSecurity(YahooNA(), 'AAPL')
s3 = YahooSecurity(SH(), '000001')
def callback(body):
qs = YahooReport.parse(body)
for quote in qs:
if quote.security == s3:
# something must wrong if SSE Composite Index goes down to 100
self.assertTrue(quote.price > 100)
f.fetch(s1, s2, s3,
callback=callback)
class YahooDayFetcherTest(unittest.TestCase):
def test_init(self):
f = YahooDayFetcher()
self.assertEqual(f._base_url, 'http://ichart.finance.yahoo.com/table.csv')
self.assertEqual(f._time_out, 20)
self.assertEqual(f._max_clients, 10)
def test_init_with_wrong_arguments(self):
self.assertRaises(AssertionError,
YahooReportFetcher,
max_clients=20)
def test_fetch(self):
f = YahooDayFetcher()
s1 = YahooSecurity(YahooNA(), 'GOOG')
s2 = YahooSecurity(YahooNA(), 'AAPL')
def callback(security, body):
iters = YahooDay.parse(security, body)
i = 0
for ohlc in iters:
self.assertTrue(ohlc.security in (s1, s2))
if i == 0 and ohlc.security == s1:
self.assertEqual(str(ohlc.date), "2011-04-28")
self.assertEqual(ohlc.open, 538.06)
self.assertEqual(ohlc.high, 539.25)
self.assertEqual(ohlc.low, 534.08)
self.assertEqual(ohlc.close, 537.97)
self.assertEqual(ohlc.volume, 2037400.0)
i += 1
start_date = datetime.datetime.strptime("2011-04-01", "%Y-%m-%d").date()
end_date = datetime.datetime.strptime("2011-04-28", "%Y-%m-%d").date()
f.fetch(s1, s2,
callback=callback,
start_date=start_date,
end_date=end_date)
class YahooNewsFetcherTest(unittest.TestCase):
def test_fetch(self):
f = YahooNewsFetcher()
s1 = YahooSecurity(YahooNA(), 'GOOG')
s2 = YahooSecurity(YahooNA(), 'AAPL')
s3 = YahooSecurity(SH(), '000001')
def callback(security, response):
self.assertTrue(response.body.startswith('<?xml'))
f.fetch(s1, callback=callback)
if __name__ == '__main__':
unittest.main()
|
yinhm/datafeed
|
datafeed/providers/tests/test_yahoo.py
|
Python
|
apache-2.0
| 6,087
|
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABACVjr0OgzAMhF8lUsriiGID4ecURX2QCMasnjLx8A2dWqoO9XLW2b7PxfxRUcCsQhDWntCzDoSBdSSMrJ5Q3paLivBNeQWtygtoUZ5Bs/IEmpR9+Mov"
+ "e7G/6Wgl1EwT4sp5pghwivDeNZxTlQ627NHZkGDD7trjPHKvv4f2fnuYA53zPmVuqsB88CNE8nZ2Wxapo3jFb1fjCcDxR0M7AQAA")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<45 and y<7):
return g[y*45 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<45 and y<7):
g[y*45 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
global t0
gw(0,0,1)
gw(1,0,1)
gw(2,0,2)
gw(3,0,6)
gw(4,0,24)
gw(5,0,120)
gw(6,0,720)
gw(7,0,5040)
gw(8,0,40320)
gw(9,0,362880)
gw(1,1,0)
t0=362880
sa(gr(9,0)*7)
sa(gr(9,0)*7)
sa(0)
sa(gr((gr(9,0)*7)%10,0))
sa((gr(9,0)*7)/10)
sa((gr(9,0)*7)/10)
return 1
def _1():
return (2)if(sp()!=0)else(3)
def _2():
sa(gr(sr()%10,0))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()/10);
sa(sr());
return (10)if(sp()!=0)else(3)
def _3():
sa(sp()+sp());
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return (9)if(sp()!=0)else(4)
def _4():
global t0
global t1
sa(sp()+sp());
t0=sp()
sa(sp()-t0)
t1=sp()
return (5)if((t1)!=0)else(8)
def _5():
sa(sp()-1)
sa(sr());
return (6)if(sp()!=0)else(7)
def _6():
sa(sr());
sa(sr());
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(gr(sr()%10,0))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()/10);
sa(sr());
return 1
def _7():
sys.stdout.write(str(gr(1,1)-3)+" ")
sys.stdout.flush()
sa(sr());
sp();
sp();
return 11
def _8():
gw(1,1,sr()+gr(1,1))
return 5
def _9():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return 3
def _10():
sa(gr(sr()%10,0))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()/10);
sa(sr());
return 1
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10]
c=0
while c<11:
c=m[c]()
|
Mikescher/Project-Euler_Befunge
|
compiled/Python2/Euler_Problem-034.py
|
Python
|
mit
| 2,461
|
from django.db import models
# Create your models here.
class Poll(models.Model):
name = models.CharField(max_length=255, verbose_name="Poll Name!!", help_text="It's the name. OF YOUR POLL!")
int_field = models.IntegerField(help_text="For no reason an int field, put a number in it!")
|
pombredanne/django-rest-angular
|
polls/models.py
|
Python
|
gpl-3.0
| 294
|
"""The tests for the MQTT discovery."""
from pathlib import Path
import re
from unittest.mock import AsyncMock, patch
import pytest
from homeassistant import config_entries
from homeassistant.components import mqtt
from homeassistant.components.mqtt.abbreviations import (
ABBREVIATIONS,
DEVICE_ABBREVIATIONS,
)
from homeassistant.components.mqtt.discovery import ALREADY_DISCOVERED, async_start
from homeassistant.const import (
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.core as ha
from tests.common import (
async_fire_mqtt_message,
mock_device_registry,
mock_entity_platform,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.mark.parametrize(
"mqtt_config",
[{mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_DISCOVERY: False}],
)
async def test_subscribing_config_topic(hass, mqtt_mock):
"""Test setting up discovery."""
entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
discovery_topic = "homeassistant"
await async_start(hass, discovery_topic, entry)
call_args1 = mqtt_mock.async_subscribe.mock_calls[0][1]
assert call_args1[2] == 0
call_args2 = mqtt_mock.async_subscribe.mock_calls[1][1]
assert call_args2[2] == 0
topics = [call_args1[0], call_args2[0]]
assert discovery_topic + "/+/+/config" in topics
assert discovery_topic + "/+/+/+/config" in topics
@pytest.mark.parametrize(
"topic, log",
[
("homeassistant/binary_sensor/bla/not_config", False),
("homeassistant/binary_sensor/rörkrökare/config", True),
],
)
async def test_invalid_topic(hass, mqtt_mock, caplog, topic, log):
"""Test sending to invalid topic."""
with patch(
"homeassistant.components.mqtt.discovery.async_dispatcher_send"
) as mock_dispatcher_send:
mock_dispatcher_send = AsyncMock(return_value=None)
async_fire_mqtt_message(hass, topic, "{}")
await hass.async_block_till_done()
assert not mock_dispatcher_send.called
if log:
assert (
f"Received message on illegal discovery topic '{topic}'" in caplog.text
)
else:
assert "Received message on illegal discovery topic'" not in caplog.text
caplog.clear()
async def test_invalid_json(hass, mqtt_mock, caplog):
"""Test sending in invalid JSON."""
with patch(
"homeassistant.components.mqtt.discovery.async_dispatcher_send"
) as mock_dispatcher_send:
mock_dispatcher_send = AsyncMock(return_value=None)
async_fire_mqtt_message(
hass, "homeassistant/binary_sensor/bla/config", "not json"
)
await hass.async_block_till_done()
assert "Unable to parse JSON" in caplog.text
assert not mock_dispatcher_send.called
async def test_only_valid_components(hass, mqtt_mock, caplog):
"""Test for a valid component."""
with patch(
"homeassistant.components.mqtt.discovery.async_dispatcher_send"
) as mock_dispatcher_send:
invalid_component = "timer"
mock_dispatcher_send = AsyncMock(return_value=None)
async_fire_mqtt_message(
hass, f"homeassistant/{invalid_component}/bla/config", "{}"
)
await hass.async_block_till_done()
assert f"Integration {invalid_component} is not supported" in caplog.text
assert not mock_dispatcher_send.called
async def test_correct_config_discovery(hass, mqtt_mock, caplog):
"""Test sending in correct JSON."""
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
assert state is not None
assert state.name == "Beer"
assert ("binary_sensor", "bla") in hass.data[ALREADY_DISCOVERED]
async def test_discover_fan(hass, mqtt_mock, caplog):
"""Test discovering an MQTT fan."""
async_fire_mqtt_message(
hass,
"homeassistant/fan/bla/config",
('{ "name": "Beer",' ' "command_topic": "test_topic" }'),
)
await hass.async_block_till_done()
state = hass.states.get("fan.beer")
assert state is not None
assert state.name == "Beer"
assert ("fan", "bla") in hass.data[ALREADY_DISCOVERED]
async def test_discover_climate(hass, mqtt_mock, caplog):
"""Test discovering an MQTT climate component."""
data = (
'{ "name": "ClimateTest",'
' "current_temperature_topic": "climate/bla/current_temp",'
' "temperature_command_topic": "climate/bla/target_temp" }'
)
async_fire_mqtt_message(hass, "homeassistant/climate/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("climate.ClimateTest")
assert state is not None
assert state.name == "ClimateTest"
assert ("climate", "bla") in hass.data[ALREADY_DISCOVERED]
async def test_discover_alarm_control_panel(hass, mqtt_mock, caplog):
"""Test discovering an MQTT alarm control panel component."""
data = (
'{ "name": "AlarmControlPanelTest",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(hass, "homeassistant/alarm_control_panel/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("alarm_control_panel.AlarmControlPanelTest")
assert state is not None
assert state.name == "AlarmControlPanelTest"
assert ("alarm_control_panel", "bla") in hass.data[ALREADY_DISCOVERED]
@pytest.mark.parametrize(
"topic, config, entity_id, name, domain",
[
(
"homeassistant/alarm_control_panel/object/bla/config",
'{ "name": "Hello World 1", "obj_id": "hello_id", "state_topic": "test-topic", "command_topic": "test-topic" }',
"alarm_control_panel.hello_id",
"Hello World 1",
"alarm_control_panel",
),
(
"homeassistant/binary_sensor/object/bla/config",
'{ "name": "Hello World 2", "obj_id": "hello_id", "state_topic": "test-topic" }',
"binary_sensor.hello_id",
"Hello World 2",
"binary_sensor",
),
(
"homeassistant/button/object/bla/config",
'{ "name": "Hello World button", "obj_id": "hello_id", "command_topic": "test-topic" }',
"button.hello_id",
"Hello World button",
"button",
),
(
"homeassistant/camera/object/bla/config",
'{ "name": "Hello World 3", "obj_id": "hello_id", "state_topic": "test-topic", "topic": "test-topic" }',
"camera.hello_id",
"Hello World 3",
"camera",
),
(
"homeassistant/climate/object/bla/config",
'{ "name": "Hello World 4", "obj_id": "hello_id", "state_topic": "test-topic" }',
"climate.hello_id",
"Hello World 4",
"climate",
),
(
"homeassistant/cover/object/bla/config",
'{ "name": "Hello World 5", "obj_id": "hello_id", "state_topic": "test-topic" }',
"cover.hello_id",
"Hello World 5",
"cover",
),
(
"homeassistant/fan/object/bla/config",
'{ "name": "Hello World 6", "obj_id": "hello_id", "state_topic": "test-topic", "command_topic": "test-topic" }',
"fan.hello_id",
"Hello World 6",
"fan",
),
(
"homeassistant/humidifier/object/bla/config",
'{ "name": "Hello World 7", "obj_id": "hello_id", "state_topic": "test-topic", "target_humidity_command_topic": "test-topic", "command_topic": "test-topic" }',
"humidifier.hello_id",
"Hello World 7",
"humidifier",
),
(
"homeassistant/number/object/bla/config",
'{ "name": "Hello World 8", "obj_id": "hello_id", "state_topic": "test-topic", "command_topic": "test-topic" }',
"number.hello_id",
"Hello World 8",
"number",
),
(
"homeassistant/scene/object/bla/config",
'{ "name": "Hello World 9", "obj_id": "hello_id", "state_topic": "test-topic", "command_topic": "test-topic" }',
"scene.hello_id",
"Hello World 9",
"scene",
),
(
"homeassistant/select/object/bla/config",
'{ "name": "Hello World 10", "obj_id": "hello_id", "state_topic": "test-topic", "options": [ "opt1", "opt2" ], "command_topic": "test-topic" }',
"select.hello_id",
"Hello World 10",
"select",
),
(
"homeassistant/sensor/object/bla/config",
'{ "name": "Hello World 11", "obj_id": "hello_id", "state_topic": "test-topic" }',
"sensor.hello_id",
"Hello World 11",
"sensor",
),
(
"homeassistant/switch/object/bla/config",
'{ "name": "Hello World 12", "obj_id": "hello_id", "state_topic": "test-topic", "command_topic": "test-topic" }',
"switch.hello_id",
"Hello World 12",
"switch",
),
(
"homeassistant/light/object/bla/config",
'{ "name": "Hello World 13", "obj_id": "hello_id", "state_topic": "test-topic", "command_topic": "test-topic" }',
"light.hello_id",
"Hello World 13",
"light",
),
(
"homeassistant/light/object/bla/config",
'{ "name": "Hello World 14", "obj_id": "hello_id", "state_topic": "test-topic", "command_topic": "test-topic", "schema": "json" }',
"light.hello_id",
"Hello World 14",
"light",
),
(
"homeassistant/light/object/bla/config",
'{ "name": "Hello World 15", "obj_id": "hello_id", "state_topic": "test-topic", "command_off_template": "template", "command_on_template": "template", "command_topic": "test-topic", "schema": "template" }',
"light.hello_id",
"Hello World 15",
"light",
),
(
"homeassistant/vacuum/object/bla/config",
'{ "name": "Hello World 16", "obj_id": "hello_id", "state_topic": "test-topic", "schema": "state" }',
"vacuum.hello_id",
"Hello World 16",
"vacuum",
),
(
"homeassistant/vacuum/object/bla/config",
'{ "name": "Hello World 17", "obj_id": "hello_id", "state_topic": "test-topic", "schema": "legacy" }',
"vacuum.hello_id",
"Hello World 17",
"vacuum",
),
(
"homeassistant/lock/object/bla/config",
'{ "name": "Hello World 18", "obj_id": "hello_id", "state_topic": "test-topic", "command_topic": "test-topic" }',
"lock.hello_id",
"Hello World 18",
"lock",
),
(
"homeassistant/device_tracker/object/bla/config",
'{ "name": "Hello World 19", "obj_id": "hello_id", "state_topic": "test-topic" }',
"device_tracker.hello_id",
"Hello World 19",
"device_tracker",
),
],
)
async def test_discovery_with_object_id(
hass, mqtt_mock, caplog, topic, config, entity_id, name, domain
):
"""Test discovering an MQTT entity with object_id."""
async_fire_mqtt_message(hass, topic, config)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == name
assert (domain, "object bla") in hass.data[ALREADY_DISCOVERED]
async def test_discovery_incl_nodeid(hass, mqtt_mock, caplog):
"""Test sending in correct JSON with optional node_id included."""
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/my_node_id/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
assert state is not None
assert state.name == "Beer"
assert ("binary_sensor", "my_node_id bla") in hass.data[ALREADY_DISCOVERED]
async def test_non_duplicate_discovery(hass, mqtt_mock, caplog):
"""Test for a non duplicate component."""
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
state_duplicate = hass.states.get("binary_sensor.beer1")
assert state is not None
assert state.name == "Beer"
assert state_duplicate is None
assert "Component has already been discovered: binary_sensor bla" in caplog.text
async def test_removal(hass, mqtt_mock, caplog):
"""Test removal of component through empty discovery message."""
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
assert state is not None
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
assert state is None
async def test_rediscover(hass, mqtt_mock, caplog):
"""Test rediscover of removed component."""
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
assert state is not None
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
assert state is None
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
assert state is not None
async def test_rapid_rediscover(hass, mqtt_mock, caplog):
"""Test immediate rediscover of removed component."""
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
assert state is not None
assert len(events) == 1
# Removal immediately followed by rediscover
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "")
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "")
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Milk", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("binary_sensor")) == 1
state = hass.states.get("binary_sensor.milk")
assert state is not None
assert len(events) == 5
# Remove the entity
assert events[1].data["entity_id"] == "binary_sensor.beer"
assert events[1].data["new_state"] is None
# Add the entity
assert events[2].data["entity_id"] == "binary_sensor.beer"
assert events[2].data["old_state"] is None
# Remove the entity
assert events[3].data["entity_id"] == "binary_sensor.beer"
assert events[3].data["new_state"] is None
# Add the entity
assert events[4].data["entity_id"] == "binary_sensor.milk"
assert events[4].data["old_state"] is None
async def test_rapid_rediscover_unique(hass, mqtt_mock, caplog):
"""Test immediate rediscover of removed component."""
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla2/config",
'{ "name": "Ale", "state_topic": "test-topic", "unique_id": "very_unique" }',
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.ale")
assert state is not None
assert len(events) == 1
# Duplicate unique_id, immediately followed by correct unique_id
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic", "unique_id": "very_unique" }',
)
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic", "unique_id": "even_uniquer" }',
)
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "")
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Milk", "state_topic": "test-topic", "unique_id": "even_uniquer" }',
)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("binary_sensor")) == 2
state = hass.states.get("binary_sensor.ale")
assert state is not None
state = hass.states.get("binary_sensor.milk")
assert state is not None
assert len(events) == 4
# Add the entity
assert events[1].data["entity_id"] == "binary_sensor.beer"
assert events[1].data["old_state"] is None
# Remove the entity
assert events[2].data["entity_id"] == "binary_sensor.beer"
assert events[2].data["new_state"] is None
# Add the entity
assert events[3].data["entity_id"] == "binary_sensor.milk"
assert events[3].data["old_state"] is None
async def test_duplicate_removal(hass, mqtt_mock, caplog):
"""Test for a non duplicate component."""
async_fire_mqtt_message(
hass,
"homeassistant/binary_sensor/bla/config",
'{ "name": "Beer", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "")
await hass.async_block_till_done()
assert "Component has already been discovered: binary_sensor bla" in caplog.text
caplog.clear()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla/config", "")
await hass.async_block_till_done()
assert "Component has already been discovered: binary_sensor bla" not in caplog.text
async def test_cleanup_device(hass, device_reg, entity_reg, mqtt_mock):
"""Test discvered device is cleaned up when removed from registry."""
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
# Verify device and registry entries are created
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert device_entry is not None
entity_entry = entity_reg.async_get("sensor.mqtt_sensor")
assert entity_entry is not None
state = hass.states.get("sensor.mqtt_sensor")
assert state is not None
device_reg.async_remove_device(device_entry.id)
await hass.async_block_till_done()
await hass.async_block_till_done()
# Verify device and registry entries are cleared
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert device_entry is None
entity_entry = entity_reg.async_get("sensor.mqtt_sensor")
assert entity_entry is None
# Verify state is removed
state = hass.states.get("sensor.mqtt_sensor")
assert state is None
await hass.async_block_till_done()
# Verify retained discovery topic has been cleared
mqtt_mock.async_publish.assert_called_once_with(
"homeassistant/sensor/bla/config", "", 0, True
)
async def test_discovery_expansion(hass, mqtt_mock, caplog):
"""Test expansion of abbreviated discovery payload."""
data = (
'{ "~": "some/base/topic",'
' "name": "DiscoveryExpansionTest1",'
' "stat_t": "test_topic/~",'
' "cmd_t": "~/test_topic",'
' "availability": ['
" {"
' "topic":"~/avail_item1",'
' "payload_available": "available",'
' "payload_not_available": "not_available"'
" },"
" {"
' "topic":"avail_item2/~",'
' "payload_available": "available",'
' "payload_not_available": "not_available"'
" }"
" ],"
' "dev":{'
' "ids":["5706DF"],'
' "name":"DiscoveryExpansionTest1 Device",'
' "mdl":"Generic",'
' "sw":"1.2.3.4",'
' "mf":"None",'
' "sa":"default_area"'
" }"
"}"
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "avail_item2/some/base/topic", "available")
await hass.async_block_till_done()
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state is not None
assert state.name == "DiscoveryExpansionTest1"
assert ("switch", "bla") in hass.data[ALREADY_DISCOVERED]
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_topic/some/base/topic", "ON")
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "some/base/topic/avail_item1", "not_available")
await hass.async_block_till_done()
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state.state == STATE_UNAVAILABLE
async def test_discovery_expansion_2(hass, mqtt_mock, caplog):
"""Test expansion of abbreviated discovery payload."""
data = (
'{ "~": "some/base/topic",'
' "name": "DiscoveryExpansionTest1",'
' "stat_t": "test_topic/~",'
' "cmd_t": "~/test_topic",'
' "availability": {'
' "topic":"~/avail_item1",'
' "payload_available": "available",'
' "payload_not_available": "not_available"'
" },"
' "dev":{'
' "ids":["5706DF"],'
' "name":"DiscoveryExpansionTest1 Device",'
' "mdl":"Generic",'
' "sw":"1.2.3.4",'
' "mf":"None",'
' "sa":"default_area"'
" }"
"}"
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "some/base/topic/avail_item1", "available")
await hass.async_block_till_done()
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state is not None
assert state.name == "DiscoveryExpansionTest1"
assert ("switch", "bla") in hass.data[ALREADY_DISCOVERED]
assert state.state == STATE_OFF
@pytest.mark.no_fail_on_log_exception
async def test_discovery_expansion_3(hass, mqtt_mock, caplog):
"""Test expansion of broken discovery payload."""
data = (
'{ "~": "some/base/topic",'
' "name": "DiscoveryExpansionTest1",'
' "stat_t": "test_topic/~",'
' "cmd_t": "~/test_topic",'
' "availability": "incorrect",'
' "dev":{'
' "ids":["5706DF"],'
' "name":"DiscoveryExpansionTest1 Device",'
' "mdl":"Generic",'
' "sw":"1.2.3.4",'
' "mf":"None",'
' "sa":"default_area"'
" }"
"}"
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
assert hass.states.get("switch.DiscoveryExpansionTest1") is None
# Make sure the malformed availability data does not trip up discovery by asserting
# there are schema valdiation errors in the log
assert (
"voluptuous.error.MultipleInvalid: expected a dictionary @ data['availability'][0]"
in caplog.text
)
async def test_discovery_expansion_without_encoding_and_value_template_1(
hass, mqtt_mock, caplog
):
"""Test expansion of raw availability payload with a template as list."""
data = (
'{ "~": "some/base/topic",'
' "name": "DiscoveryExpansionTest1",'
' "stat_t": "test_topic/~",'
' "cmd_t": "~/test_topic",'
' "encoding":"",'
' "availability": [{'
' "topic":"~/avail_item1",'
' "payload_available": "1",'
' "payload_not_available": "0",'
' "value_template":"{{value|unpack(\'b\')}}"'
" }],"
' "dev":{'
' "ids":["5706DF"],'
' "name":"DiscoveryExpansionTest1 Device",'
' "mdl":"Generic",'
' "sw":"1.2.3.4",'
' "mf":"None",'
' "sa":"default_area"'
" }"
"}"
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "some/base/topic/avail_item1", b"\x01")
await hass.async_block_till_done()
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state is not None
assert state.name == "DiscoveryExpansionTest1"
assert ("switch", "bla") in hass.data[ALREADY_DISCOVERED]
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "some/base/topic/avail_item1", b"\x00")
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state.state == STATE_UNAVAILABLE
async def test_discovery_expansion_without_encoding_and_value_template_2(
hass, mqtt_mock, caplog
):
"""Test expansion of raw availability payload with a template directly."""
data = (
'{ "~": "some/base/topic",'
' "name": "DiscoveryExpansionTest1",'
' "stat_t": "test_topic/~",'
' "cmd_t": "~/test_topic",'
' "availability_topic":"~/avail_item1",'
' "payload_available": "1",'
' "payload_not_available": "0",'
' "encoding":"",'
' "availability_template":"{{ value | unpack(\'b\') }}",'
' "dev":{'
' "ids":["5706DF"],'
' "name":"DiscoveryExpansionTest1 Device",'
' "mdl":"Generic",'
' "sw":"1.2.3.4",'
' "mf":"None",'
' "sa":"default_area"'
" }"
"}"
)
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "some/base/topic/avail_item1", b"\x01")
await hass.async_block_till_done()
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state is not None
assert state.name == "DiscoveryExpansionTest1"
assert ("switch", "bla") in hass.data[ALREADY_DISCOVERED]
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "some/base/topic/avail_item1", b"\x00")
state = hass.states.get("switch.DiscoveryExpansionTest1")
assert state.state == STATE_UNAVAILABLE
ABBREVIATIONS_WHITE_LIST = [
# MQTT client/server/trigger settings
"CONF_BIRTH_MESSAGE",
"CONF_BROKER",
"CONF_CERTIFICATE",
"CONF_CLIENT_CERT",
"CONF_CLIENT_ID",
"CONF_CLIENT_KEY",
"CONF_DISCOVERY",
"CONF_DISCOVERY_ID",
"CONF_DISCOVERY_PREFIX",
"CONF_EMBEDDED",
"CONF_KEEPALIVE",
"CONF_TLS_INSECURE",
"CONF_TLS_VERSION",
"CONF_WILL_MESSAGE",
# Undocumented device configuration
"CONF_DEPRECATED_VIA_HUB",
"CONF_VIA_DEVICE",
# Already short
"CONF_FAN_MODE_LIST",
"CONF_HOLD_LIST",
"CONF_HS",
"CONF_MODE_LIST",
"CONF_PRECISION",
"CONF_QOS",
"CONF_SCHEMA",
"CONF_SWING_MODE_LIST",
"CONF_TEMP_STEP",
]
async def test_missing_discover_abbreviations(hass, mqtt_mock, caplog):
"""Check MQTT platforms for missing abbreviations."""
missing = []
regex = re.compile(r"(CONF_[a-zA-Z\d_]*) *= *[\'\"]([a-zA-Z\d_]*)[\'\"]")
for fil in Path(mqtt.__file__).parent.rglob("*.py"):
if fil.name == "trigger.py":
continue
with open(fil) as file:
matches = re.findall(regex, file.read())
for match in matches:
if (
match[1] not in ABBREVIATIONS.values()
and match[1] not in DEVICE_ABBREVIATIONS.values()
and match[0] not in ABBREVIATIONS_WHITE_LIST
):
missing.append(
"{}: no abbreviation for {} ({})".format(
fil, match[1], match[0]
)
)
assert not missing
async def test_no_implicit_state_topic_switch(hass, mqtt_mock, caplog):
"""Test no implicit state topic for switch."""
data = '{ "name": "Test1",' ' "command_topic": "cmnd"' "}"
async_fire_mqtt_message(hass, "homeassistant/switch/bla/config", data)
await hass.async_block_till_done()
assert "implicit state_topic is deprecated" not in caplog.text
state = hass.states.get("switch.Test1")
assert state is not None
assert state.name == "Test1"
assert ("switch", "bla") in hass.data[ALREADY_DISCOVERED]
assert state.state == "off"
assert state.attributes["assumed_state"] is True
async_fire_mqtt_message(hass, "homeassistant/switch/bla/state", "ON")
state = hass.states.get("switch.Test1")
assert state.state == "off"
@pytest.mark.parametrize(
"mqtt_config",
[
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_DISCOVERY_PREFIX: "my_home/homeassistant/register",
}
],
)
async def test_complex_discovery_topic_prefix(hass, mqtt_mock, caplog):
"""Tests handling of discovery topic prefix with multiple slashes."""
async_fire_mqtt_message(
hass,
("my_home/homeassistant/register/binary_sensor/node1/object1/config"),
'{ "name": "Beer", "state_topic": "test-topic" }',
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.beer")
assert state is not None
assert state.name == "Beer"
assert ("binary_sensor", "node1 object1") in hass.data[ALREADY_DISCOVERED]
async def test_mqtt_integration_discovery_subscribe_unsubscribe(
hass, mqtt_client_mock, mqtt_mock
):
"""Check MQTT integration discovery subscribe and unsubscribe."""
mock_entity_platform(hass, "config_flow.comp", None)
entry = hass.config_entries.async_entries("mqtt")[0]
mqtt_mock().connected = True
with patch(
"homeassistant.components.mqtt.discovery.async_get_mqtt",
return_value={"comp": ["comp/discovery/#"]},
):
await async_start(hass, "homeassistant", entry)
await hass.async_block_till_done()
mqtt_client_mock.subscribe.assert_any_call("comp/discovery/#", 0)
assert not mqtt_client_mock.unsubscribe.called
class TestFlow(config_entries.ConfigFlow):
"""Test flow."""
async def async_step_mqtt(self, discovery_info):
"""Test mqtt step."""
return self.async_abort(reason="already_configured")
with patch.dict(config_entries.HANDLERS, {"comp": TestFlow}):
mqtt_client_mock.subscribe.assert_any_call("comp/discovery/#", 0)
assert not mqtt_client_mock.unsubscribe.called
async_fire_mqtt_message(hass, "comp/discovery/bla/config", "")
await hass.async_block_till_done()
mqtt_client_mock.unsubscribe.assert_called_once_with("comp/discovery/#")
mqtt_client_mock.unsubscribe.reset_mock()
async_fire_mqtt_message(hass, "comp/discovery/bla/config", "")
await hass.async_block_till_done()
assert not mqtt_client_mock.unsubscribe.called
async def test_mqtt_discovery_unsubscribe_once(hass, mqtt_client_mock, mqtt_mock):
"""Check MQTT integration discovery unsubscribe once."""
mock_entity_platform(hass, "config_flow.comp", None)
entry = hass.config_entries.async_entries("mqtt")[0]
mqtt_mock().connected = True
with patch(
"homeassistant.components.mqtt.discovery.async_get_mqtt",
return_value={"comp": ["comp/discovery/#"]},
):
await async_start(hass, "homeassistant", entry)
await hass.async_block_till_done()
mqtt_client_mock.subscribe.assert_any_call("comp/discovery/#", 0)
assert not mqtt_client_mock.unsubscribe.called
class TestFlow(config_entries.ConfigFlow):
"""Test flow."""
async def async_step_mqtt(self, discovery_info):
"""Test mqtt step."""
return self.async_abort(reason="already_configured")
with patch.dict(config_entries.HANDLERS, {"comp": TestFlow}):
async_fire_mqtt_message(hass, "comp/discovery/bla/config", "")
async_fire_mqtt_message(hass, "comp/discovery/bla/config", "")
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_client_mock.unsubscribe.assert_called_once_with("comp/discovery/#")
|
jawilson/home-assistant
|
tests/components/mqtt/test_discovery.py
|
Python
|
apache-2.0
| 34,443
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module that encodes (decodes) nested structures into (from) protos.
The intended use is to serialize everything needed to restore a `Function` that
was saved into a SavedModel. This may include concrete function inputs and
outputs, signatures, function specs, etc.
Example use:
coder = nested_structure_coder.StructureCoder()
# Encode into proto.
signature_proto = coder.encode_structure(function.input_signature)
# Decode into a Python object.
restored_signature = coder.decode_proto(signature_proto)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import six
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.distribute import values
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import row_partition
from tensorflow.python.util import compat
from tensorflow.python.util.compat import collections_abc
class NotEncodableError(Exception):
"""Error raised when a coder cannot encode an object."""
class StructureCoder(object):
"""Encoder and decoder for nested structures into protos."""
_codecs = []
@classmethod
def register_codec(cls, x):
cls._codecs.append(x)
@classmethod
def _get_encoders(cls):
return [(c.can_encode, c.do_encode) for c in cls._codecs]
@classmethod
def _get_decoders(cls):
return [(c.can_decode, c.do_decode) for c in cls._codecs]
def _map_structure(self, pyobj, coders):
for can, do in coders:
if can(pyobj):
recursion_fn = functools.partial(self._map_structure, coders=coders)
return do(pyobj, recursion_fn)
raise NotEncodableError(
"No encoder for object [%s] of type [%s]." % (str(pyobj), type(pyobj)))
def encode_structure(self, nested_structure):
"""Encodes nested structures composed of encodable types into a proto.
Args:
nested_structure: Structure to encode.
Returns:
Encoded proto.
Raises:
NotEncodableError: For values for which there are no encoders.
"""
return self._map_structure(nested_structure, self._get_encoders())
def can_encode(self, nested_structure):
"""Determines whether a nested structure can be encoded into a proto.
Args:
nested_structure: Structure to encode.
Returns:
True if the nested structured can be encoded.
"""
try:
self.encode_structure(nested_structure)
except NotEncodableError:
return False
return True
def decode_proto(self, proto):
"""Decodes proto representing a nested structure.
Args:
proto: Proto to decode.
Returns:
Decoded structure.
Raises:
NotEncodableError: For values for which there are no encoders.
"""
return self._map_structure(proto, self._get_decoders())
class _ListCodec(object):
"""Codec for lists."""
def can_encode(self, pyobj):
return isinstance(pyobj, list)
def do_encode(self, list_value, encode_fn):
encoded_list = struct_pb2.StructuredValue()
encoded_list.list_value.CopyFrom(struct_pb2.ListValue())
for element in list_value:
encoded_list.list_value.values.add().CopyFrom(encode_fn(element))
return encoded_list
def can_decode(self, value):
return value.HasField("list_value")
def do_decode(self, value, decode_fn):
return [decode_fn(element) for element in value.list_value.values]
StructureCoder.register_codec(_ListCodec())
def _is_tuple(obj):
return not _is_named_tuple(obj) and isinstance(obj, tuple)
def _is_named_tuple(instance):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
Returns:
True if `instance` is a `namedtuple`.
"""
if not isinstance(instance, tuple):
return False
return (hasattr(instance, "_fields") and
isinstance(instance._fields, collections_abc.Sequence) and
all(isinstance(f, six.string_types) for f in instance._fields))
class _TupleCodec(object):
"""Codec for tuples."""
def can_encode(self, pyobj):
return _is_tuple(pyobj)
def do_encode(self, tuple_value, encode_fn):
encoded_tuple = struct_pb2.StructuredValue()
encoded_tuple.tuple_value.CopyFrom(struct_pb2.TupleValue())
for element in tuple_value:
encoded_tuple.tuple_value.values.add().CopyFrom(encode_fn(element))
return encoded_tuple
def can_decode(self, value):
return value.HasField("tuple_value")
def do_decode(self, value, decode_fn):
return tuple(decode_fn(element) for element in value.tuple_value.values)
StructureCoder.register_codec(_TupleCodec())
class _DictCodec(object):
"""Codec for dicts."""
def can_encode(self, pyobj):
return isinstance(pyobj, dict)
def do_encode(self, dict_value, encode_fn):
encoded_dict = struct_pb2.StructuredValue()
encoded_dict.dict_value.CopyFrom(struct_pb2.DictValue())
for key, value in dict_value.items():
encoded_dict.dict_value.fields[key].CopyFrom(encode_fn(value))
return encoded_dict
def can_decode(self, value):
return value.HasField("dict_value")
def do_decode(self, value, decode_fn):
return {key: decode_fn(val) for key, val in value.dict_value.fields.items()}
StructureCoder.register_codec(_DictCodec())
class _NamedTupleCodec(object):
"""Codec for namedtuples.
Encoding and decoding a namedtuple reconstructs a namedtuple with a different
actual Python type, but with the same `typename` and `fields`.
"""
def can_encode(self, pyobj):
return _is_named_tuple(pyobj)
def do_encode(self, named_tuple_value, encode_fn):
encoded_named_tuple = struct_pb2.StructuredValue()
encoded_named_tuple.named_tuple_value.CopyFrom(struct_pb2.NamedTupleValue())
encoded_named_tuple.named_tuple_value.name = \
named_tuple_value.__class__.__name__
for key in named_tuple_value._fields:
pair = encoded_named_tuple.named_tuple_value.values.add()
pair.key = key
pair.value.CopyFrom(encode_fn(named_tuple_value._asdict()[key]))
return encoded_named_tuple
def can_decode(self, value):
return value.HasField("named_tuple_value")
def do_decode(self, value, decode_fn):
key_value_pairs = value.named_tuple_value.values
items = [(pair.key, decode_fn(pair.value)) for pair in key_value_pairs]
named_tuple_type = collections.namedtuple(value.named_tuple_value.name,
[item[0] for item in items])
return named_tuple_type(**dict(items))
StructureCoder.register_codec(_NamedTupleCodec())
class _Float64Codec(object):
"""Codec for floats."""
def can_encode(self, pyobj):
return isinstance(pyobj, float)
def do_encode(self, float64_value, encode_fn):
del encode_fn
value = struct_pb2.StructuredValue()
value.float64_value = float64_value
return value
def can_decode(self, value):
return value.HasField("float64_value")
def do_decode(self, value, decode_fn):
del decode_fn
return value.float64_value
StructureCoder.register_codec(_Float64Codec())
class _Int64Codec(object):
"""Codec for Python integers (limited to 64 bit values)."""
def can_encode(self, pyobj):
return not isinstance(pyobj, bool) and isinstance(pyobj, int)
def do_encode(self, int_value, encode_fn):
del encode_fn
value = struct_pb2.StructuredValue()
value.int64_value = int_value
return value
def can_decode(self, value):
return value.HasField("int64_value")
def do_decode(self, value, decode_fn):
del decode_fn
return int(value.int64_value)
StructureCoder.register_codec(_Int64Codec())
class _StringCodec(object):
"""Codec for strings.
See StructuredValue.string_value in proto/struct.proto for more detailed
explanation.
"""
def can_encode(self, pyobj):
return isinstance(pyobj, str)
def do_encode(self, string_value, encode_fn):
del encode_fn
value = struct_pb2.StructuredValue()
value.string_value = string_value
return value
def can_decode(self, value):
return value.HasField("string_value")
def do_decode(self, value, decode_fn):
del decode_fn
return compat.as_str(value.string_value)
StructureCoder.register_codec(_StringCodec())
class _NoneCodec(object):
"""Codec for None."""
def can_encode(self, pyobj):
return pyobj is None
def do_encode(self, none_value, encode_fn):
del encode_fn, none_value
value = struct_pb2.StructuredValue()
value.none_value.CopyFrom(struct_pb2.NoneValue())
return value
def can_decode(self, value):
return value.HasField("none_value")
def do_decode(self, value, decode_fn):
del decode_fn, value
return None
StructureCoder.register_codec(_NoneCodec())
class _BoolCodec(object):
"""Codec for booleans."""
def can_encode(self, pyobj):
return isinstance(pyobj, bool)
def do_encode(self, bool_value, encode_fn):
del encode_fn
value = struct_pb2.StructuredValue()
value.bool_value = bool_value
return value
def can_decode(self, value):
return value.HasField("bool_value")
def do_decode(self, value, decode_fn):
del decode_fn
return value.bool_value
StructureCoder.register_codec(_BoolCodec())
class _TensorShapeCodec(object):
"""Codec for `TensorShape`."""
def can_encode(self, pyobj):
return isinstance(pyobj, tensor_shape.TensorShape)
def do_encode(self, tensor_shape_value, encode_fn):
del encode_fn
encoded_tensor_shape = struct_pb2.StructuredValue()
encoded_tensor_shape.tensor_shape_value.CopyFrom(
tensor_shape_value.as_proto())
return encoded_tensor_shape
def can_decode(self, value):
return value.HasField("tensor_shape_value")
def do_decode(self, value, decode_fn):
del decode_fn
return tensor_shape.TensorShape(value.tensor_shape_value)
StructureCoder.register_codec(_TensorShapeCodec())
class _TensorTypeCodec(object):
"""Codec for `TensorType`."""
def can_encode(self, pyobj):
return isinstance(pyobj, dtypes.DType)
def do_encode(self, tensor_dtype_value, encode_fn):
del encode_fn
encoded_tensor_type = struct_pb2.StructuredValue()
encoded_tensor_type.tensor_dtype_value = tensor_dtype_value.as_datatype_enum
return encoded_tensor_type
def can_decode(self, value):
return value.HasField("tensor_dtype_value")
def do_decode(self, value, decode_fn):
del decode_fn
return dtypes.DType(value.tensor_dtype_value)
StructureCoder.register_codec(_TensorTypeCodec())
class _TensorSpecCodec(object):
"""Codec for `TensorSpec`."""
def can_encode(self, pyobj):
# BoundedTensorSpec has its own decoder.
return (isinstance(pyobj, tensor_spec.TensorSpec) and
not isinstance(pyobj, tensor_spec.BoundedTensorSpec))
def do_encode(self, tensor_spec_value, encode_fn):
encoded_tensor_spec = struct_pb2.StructuredValue()
encoded_tensor_spec.tensor_spec_value.CopyFrom(
struct_pb2.TensorSpecProto(
shape=encode_fn(tensor_spec_value.shape).tensor_shape_value,
dtype=encode_fn(tensor_spec_value.dtype).tensor_dtype_value,
name=tensor_spec_value.name))
return encoded_tensor_spec
def can_decode(self, value):
return value.HasField("tensor_spec_value")
def do_decode(self, value, decode_fn):
name = value.tensor_spec_value.name
return tensor_spec.TensorSpec(
shape=decode_fn(
struct_pb2.StructuredValue(
tensor_shape_value=value.tensor_spec_value.shape)),
dtype=decode_fn(
struct_pb2.StructuredValue(
tensor_dtype_value=value.tensor_spec_value.dtype)),
name=(name if name else None))
StructureCoder.register_codec(_TensorSpecCodec())
class _BoundedTensorSpecCodec(object):
"""Codec for `BoundedTensorSpec`."""
def can_encode(self, pyobj):
return isinstance(pyobj, tensor_spec.BoundedTensorSpec)
def do_encode(self, bounded_tensor_spec_value, encode_fn):
"""Returns an encoded proto for the given `tf.BoundedTensorSpec`."""
encoded_bounded_tensor_spec = struct_pb2.StructuredValue()
encoded_bounded_tensor_spec.bounded_tensor_spec_value.CopyFrom(
struct_pb2.BoundedTensorSpecProto(
shape=encode_fn(bounded_tensor_spec_value.shape).tensor_shape_value,
dtype=encode_fn(bounded_tensor_spec_value.dtype).tensor_dtype_value,
name=bounded_tensor_spec_value.name,
minimum=tensor_util.make_tensor_proto(
bounded_tensor_spec_value.minimum),
maximum=tensor_util.make_tensor_proto(
bounded_tensor_spec_value.maximum)))
return encoded_bounded_tensor_spec
def can_decode(self, value):
return value.HasField("bounded_tensor_spec_value")
def do_decode(self, value, decode_fn):
btsv = value.bounded_tensor_spec_value
name = btsv.name
return tensor_spec.BoundedTensorSpec(
shape=decode_fn(
struct_pb2.StructuredValue(tensor_shape_value=btsv.shape)),
dtype=decode_fn(
struct_pb2.StructuredValue(tensor_dtype_value=btsv.dtype)),
minimum=tensor_util.MakeNdarray(btsv.minimum),
maximum=tensor_util.MakeNdarray(btsv.maximum),
name=(name if name else None))
StructureCoder.register_codec(_BoundedTensorSpecCodec())
class _TypeSpecCodec(object):
"""Codec for `tf.TypeSpec`."""
# Mapping from enum value to type (TypeSpec subclass).
TYPE_SPEC_CLASS_FROM_PROTO = {
struct_pb2.TypeSpecProto.SPARSE_TENSOR_SPEC:
sparse_tensor.SparseTensorSpec,
struct_pb2.TypeSpecProto.INDEXED_SLICES_SPEC:
indexed_slices.IndexedSlicesSpec,
struct_pb2.TypeSpecProto.RAGGED_TENSOR_SPEC:
ragged_tensor.RaggedTensorSpec,
struct_pb2.TypeSpecProto.TENSOR_ARRAY_SPEC:
tensor_array_ops.TensorArraySpec,
struct_pb2.TypeSpecProto.DATA_DATASET_SPEC:
dataset_ops.DatasetSpec,
struct_pb2.TypeSpecProto.DATA_ITERATOR_SPEC:
iterator_ops.IteratorSpec,
struct_pb2.TypeSpecProto.OPTIONAL_SPEC:
optional_ops.OptionalSpec,
struct_pb2.TypeSpecProto.PER_REPLICA_SPEC:
values.PerReplicaSpec,
struct_pb2.TypeSpecProto.VARIABLE_SPEC:
resource_variable_ops.VariableSpec,
struct_pb2.TypeSpecProto.ROW_PARTITION_SPEC:
row_partition.RowPartitionSpec,
struct_pb2.TypeSpecProto.NDARRAY_SPEC:
np_arrays.NdarraySpec,
}
# Mapping from type (TypeSpec subclass) to enum value.
TYPE_SPEC_CLASS_TO_PROTO = dict(
(cls, enum) for (enum, cls) in TYPE_SPEC_CLASS_FROM_PROTO.items())
def can_encode(self, pyobj):
# pylint: disable=unidiomatic-typecheck
return type(pyobj) in self.TYPE_SPEC_CLASS_TO_PROTO
def do_encode(self, type_spec_value, encode_fn):
"""Returns an encoded proto for the given `tf.TypeSpec`."""
type_spec_class = self.TYPE_SPEC_CLASS_TO_PROTO[type(type_spec_value)]
type_state = type_spec_value._serialize() # pylint: disable=protected-access
encoded_type_spec = struct_pb2.StructuredValue()
encoded_type_spec.type_spec_value.CopyFrom(
struct_pb2.TypeSpecProto(
type_spec_class=type_spec_class,
type_state=encode_fn(type_state),
type_spec_class_name=type(type_spec_value).__name__))
return encoded_type_spec
def can_decode(self, value):
return value.HasField("type_spec_value")
def do_decode(self, value, decode_fn):
"""Returns the `tf.TypeSpec` encoded by the proto `value`."""
type_spec_proto = value.type_spec_value
type_spec_class_enum = type_spec_proto.type_spec_class
if type_spec_class_enum not in self.TYPE_SPEC_CLASS_FROM_PROTO:
raise ValueError(
"The type '%s' is not supported by this version of TensorFlow. "
"(The object you are loading must have been created with a newer "
"version of TensorFlow.)" % type_spec_proto.type_spec_class_name)
type_spec_class = self.TYPE_SPEC_CLASS_FROM_PROTO[type_spec_class_enum]
# pylint: disable=protected-access
return type_spec_class._deserialize(decode_fn(type_spec_proto.type_state))
StructureCoder.register_codec(_TypeSpecCodec())
|
karllessard/tensorflow
|
tensorflow/python/saved_model/nested_structure_coder.py
|
Python
|
apache-2.0
| 17,543
|
#!/usr/bin/env python
import unittest
from markdown_to_pdf import *
def local_link_callback(link, markdown_filepath):
if link[0] == '#':
return '#parsed-local-link'
else:
return '#parsed-remote-link'
class TestMarkdownToPdf( unittest.TestCase ):
def test_is_a_markdown_header(self):
self.assertEqual( is_a_markdown_header( 'No header', '' ), False )
self.assertEqual( is_a_markdown_header( '# Header 1', '' ), True )
self.assertEqual( is_a_markdown_header( ' # Header 1', '' ), False )
self.assertEqual( is_a_markdown_header( '## Header 2', '' ), True )
self.assertEqual( is_a_markdown_header( '### Header 3', '' ), True )
self.assertEqual( is_a_markdown_header( '#### Header 4', '' ), True )
self.assertEqual( is_a_markdown_header( '##### Header 5', '' ), True )
self.assertEqual( is_a_markdown_header( '###### Header 6', '' ), True )
self.assertEqual( is_a_markdown_header( 'Header', '==' ), True )
self.assertEqual( is_a_markdown_header( 'Header', '--' ), True )
self.assertEqual( is_a_markdown_header( '', '--' ), False )
self.assertEqual( is_a_markdown_header( '', '' ), False )
self.assertEqual( is_a_markdown_header( '-', '-'), False )
self.assertEqual( is_a_markdown_header( ' ', '- Win'), False )
self.assertEqual( is_a_markdown_header( '- Win', '- Win'), False )
self.assertEqual( is_a_markdown_header( '`# Not a header 1`', '' ), False )
self.assertEqual( is_a_markdown_header( '<a name="section2"></a>`Timestamp` Interceptor','------------'), True )
self.assertEqual( is_a_markdown_header( 'Not a header','- List item'), False )
def test_slugify_string(self):
self.assertEqual( slugify_string( 'String' ), 'string' )
self.assertEqual( slugify_string( 'String with spaces' ), 'string-with-spaces' )
self.assertEqual( slugify_string( '1?2<3>4?5(6)7&8"9\'10=11/12' ), '123456789101112' )
def test_prevent_latex_images_floating(self):
self.assertEqual( prevent_latex_images_floating( '' ), '\ ' )
self.assertEqual( prevent_latex_images_floating( '' ), '\ ' )
self.assertEqual( prevent_latex_images_floating( '\n' ), '\ \n' )
def test_is_an_url(self):
self.assertEqual( is_an_url( 'www.google.com' ), True )
self.assertEqual( is_an_url( 'http://www.google.com' ), True )
self.assertEqual( is_an_url( 'http:/www.google.com' ), False )
self.assertEqual( is_an_url( 'https://www.google.com' ), True )
def test_make_header_id_unique(self):
user_ids = {}
self.assertEqual( make_header_id_unique( 'header', user_ids ), 'header' )
self.assertEqual( user_ids, { 'header': 1 } )
self.assertEqual( make_header_id_unique( 'header', user_ids ), 'header-1' )
self.assertEqual( user_ids, { 'header': 2 } )
self.assertEqual( make_header_id_unique( 'header', user_ids ), 'header-2' )
self.assertEqual( user_ids, { 'header': 3 } )
self.assertEqual( make_header_id_unique( 'new-header', user_ids ), 'new-header' )
self.assertEqual( user_ids, { 'header': 3, 'new-header': 1 } )
self.assertEqual( make_header_id_unique( 'new-header', user_ids ), 'new-header-1' )
self.assertEqual( user_ids, { 'header': 3, 'new-header': 2 } )
def test_make_image_path_absolute(self):
prefix = os.path.join(os.getcwd(),'rel-dir')
# Local image
self.assertEqual(
make_image_path_absolute('link', 'rel-dir/'),
'%s/link' % prefix)
# Remote image (URL should't be changed)
self.assertEqual(
make_image_path_absolute('www.fakeurl.com', 'rel-dir/' ),
'www.fakeurl.com')
# Remote image (URL should't be changed)
self.assertEqual(
make_image_path_absolute('http://www.fakeurl.com', 'rel-dir/' ),
'http://www.fakeurl.com')
def test_process_link_destination(self):
# Simple local link (to section in same file).
self.assertEqual(
process_link_destination(
'#link',
'foo-dir/',
local_link_callback ),
'#parsed-local-link'
)
# Simple local link (to section in other file).
self.assertEqual(
process_link_destination(
'[link-text](dst_file.md/link)',
'foo-dir/',
local_link_callback ),
'#parsed-remote-link'
)
# Simple URL link (shouldn't be parsed).
self.assertEqual(
process_link_destination(
'www.google.com',
'foo-dir/',
local_link_callback ),
'www.google.com'
)
# Simple URL link (shouldn't be parsed).
self.assertEqual(
process_link_destination(
'http://www.google.com',
'foo-dir/',
local_link_callback ),
'http://www.google.com'
)
def test_have_table_separator(self):
# Simple text shouldn't be recognized as table separator.
self.assertEqual(
have_table_separator('foo'),
False
)
# Empty list item shouldn't be recognized as table separator.
self.assertEqual(
have_table_separator('- '),
False
)
# Simple table separator.
self.assertEqual(
have_table_separator('|---|'),
True
)
# Simple table separator (with spaces).
self.assertEqual(
have_table_separator('| --- |'),
True
)
# Table separator with alignment specifiers.
self.assertEqual(
have_table_separator('| :- | :-: | -: |'),
True
)
# Table row, not separator
self.assertEqual(
have_table_separator('| Method | Path | Action|'),
False
)
def test_remove_code_from_line(self):
test_cases = [
[
'Line without code should not be modified',
'Line without code should not be modified'
],
[
'This `code` and this `code` should be removed',
'This code and this code should be removed'
],
[
'Escaped \`code\` should not be removed',
'Escaped \`code\` should not be removed'
],
[
'This ```code` and this ```code``` should be removed',
'This code and this code should be removed'
],
[
'Escaped \`\`\`code\`\`\` should not be removed',
'Escaped \`\`\`code\`\`\` should not be removed'
],
]
for test_case in test_cases:
input_str = test_case[0]
expected_output = test_case[1]
self.assertEqual(remove_code_from_line(input_str), expected_output)
def test_normalize_file_extension(self):
# File with .pdf extension. Do not change it.
self.assertEqual(normalize_file_extension('dir/file.pdf'), 'dir/file.pdf')
# File with multiple extensions ending in .pdf. Do not change it.
self.assertEqual(normalize_file_extension('dir/file.txt.pdf'), 'dir/file.txt.pdf')
# File without extesion. Add .pdf extension.
self.assertEqual(normalize_file_extension('dir/file'), 'dir/file.pdf')
# File with empty extension. Add .pdf extension.
self.assertEqual(normalize_file_extension('dir/file.'), 'dir/file.pdf')
# File with extension other than pdf. Add .pdf extension.
self.assertEqual(normalize_file_extension('dir/file.txt'), 'dir/file.txt.pdf')
def test_fix_special_characters_inside_links(self):
self.assertEqual(fix_special_characters_inside_links('http://www.test.com/a\<aa'),'http://www.test.com/a\<aa')
self.assertEqual(fix_special_characters_inside_links('http://www.test.com/a\\<aa'),'http://www.test.com/a\\<aa')
self.assertEqual(fix_special_characters_inside_links('http://www.test.com/a<aa'),'http://www.test.com/a\<aa')
self.assertEqual(fix_special_characters_inside_links('http://www.test.com/a\>aa'),'http://www.test.com/a\>aa')
self.assertEqual(fix_special_characters_inside_links('http://www.test.com/a\\>aa'),'http://www.test.com/a\\>aa')
self.assertEqual(fix_special_characters_inside_links('http://www.test.com/a>aa'),'http://www.test.com/a\>aa')
self.assertEqual(fix_special_characters_inside_links('http://www.test.com/aaa'),'http://www.test.com/aaa')
def test_fix_blanck_spaces_before_code_tag(self):
self.assertEqual(fix_blanck_spaces_before_code_tag('\n```code'),'\n```code')
self.assertEqual(fix_blanck_spaces_before_code_tag('\n ```code'),'\n```code')
self.assertEqual(fix_blanck_spaces_before_code_tag('\n ```code'),'\n```code')
self.assertEqual(fix_blanck_spaces_before_code_tag('\n ```code'),'\n```code')
self.assertEqual(fix_blanck_spaces_before_code_tag('\n ```code'),'\n ```code')
def test_fix_html_before_title(self):
self.assertEqual(fix_html_before_title('>\n#title'),'>\n\n#title')
self.assertEqual(fix_html_before_title('>\n\n#title'),'>\n\n#title')
self.assertEqual(fix_html_before_title('>\n\n\n#title'),'>\n\n\n#title')
self.assertEqual(fix_html_before_title('>#title'),'>#title')
def test_fix_new_line_after_img(self):
self.assertEqual(fix_new_line_after_img(''),'')
self.assertEqual(fix_new_line_after_img('\n'),'\n\n')
self.assertEqual(fix_new_line_after_img('\n\n'),'\n\n')
self.assertEqual(fix_new_line_after_img('\n\n\n'),'\n\n\n')
self.assertEqual(fix_new_line_after_img('\n\n\n\n'),'\n\n\n\n')
def test_separate_latex_anchors(self):
self.assertEqual(separate_latex_anchors('\\anchor{aaaa}\n\\anchor'),'\\anchor{aaaa}\n\n\\anchor')
self.assertEqual(separate_latex_anchors('\\anchor{aaaa}\n\n\\anchor'),'\\anchor{aaaa}\n\n\\anchor')
self.assertEqual(separate_latex_anchors('\\anchor{aaaa}\n\n\n\\anchor'),'\\anchor{aaaa}\n\n\n\\anchor')
self.assertEqual(separate_latex_anchors('\\anchor{aaaa}\\anchor'),'\\anchor{aaaa}\\anchor')
def test_add_newlines_before_markdown_headers(self):
self.assertEqual(add_newlines_before_markdown_headers('text\n```code\n#title asdfs \n asdfasdf\n```'),'text\n```code\n#title asdfs \n asdfasdf\n```')
self.assertEqual(add_newlines_before_markdown_headers('text\n```code\n```\n#title asdfs \n asdfasdf'),'text\n```code\n```\n\n\n#title asdfs \n asdfasdf')
self.assertEqual(add_newlines_before_markdown_headers('text\n ```code\n#title asdfs \n asdfasdf\n```'),'text\n ```code\n#title asdfs \n asdfasdf\n```')
self.assertEqual(add_newlines_before_markdown_headers('text\n ```code\n```\n#title asdfs \n asdfasdf'),'text\n ```code\n```\n\n\n#title asdfs \n asdfasdf')
self.assertEqual(add_newlines_before_markdown_headers('text\n#title asdfs \n asdfasdf'),'text\n\n\n#title asdfs \n asdfasdf')
def test_remove_ids_from_a(self):
self.assertEqual(remove_ids_from_a('<a href=#link id="identifier">text</a>'),'<a href=#link >text</a>')
self.assertEqual(remove_ids_from_a('<a href=#link id= "identifier">text</a>'),'<a href=#link >text</a>')
self.assertEqual(remove_ids_from_a('<a href=#link id = "identifier">text</a>'),'<a href=#link >text</a>')
self.assertEqual(remove_ids_from_a('<a href=#link id= "identifier">text</a>'),'<a href=#link >text</a>')
self.assertEqual(remove_ids_from_a('<a id="identifier" href=#link>text</a>'),'<a href=#link>text</a>')
self.assertEqual(remove_ids_from_a('<a id= "identifier" href=#link>text</a>'),'<a href=#link>text</a>')
self.assertEqual(remove_ids_from_a('<a id = "identifier" href=#link>text</a>'),'<a href=#link>text</a>')
self.assertEqual(remove_ids_from_a('<a id= "identifier" href=#link>text</a>'),'<a href=#link>text</a>')
self.assertEqual(remove_ids_from_a('<a href=#link id=\'identifier\'>text</a>'),'<a href=#link >text</a>')
self.assertEqual(remove_ids_from_a('<a href=#link id= \'identifier\'>text</a>'),'<a href=#link >text</a>')
self.assertEqual(remove_ids_from_a('<a href=#link id = \'identifier\'>text</a>'),'<a href=#link >text</a>')
self.assertEqual(remove_ids_from_a('<a href=#link id= \'identifier\'>text</a>'),'<a href=#link >text</a>')
self.assertEqual(remove_ids_from_a('<a id=\'identifier\' href=#link>text</a>'),'<a href=#link>text</a>')
self.assertEqual(remove_ids_from_a('<a id= \'identifier\' href=#link>text</a>'),'<a href=#link>text</a>')
self.assertEqual(remove_ids_from_a('<a id = \'identifier\' href=#link>text</a>'),'<a href=#link>text</a>')
self.assertEqual(remove_ids_from_a('<a id= \'identifier\' href=#link>text</a>'),'<a href=#link>text</a>')
if __name__ == "__main__":
unittest.main()
|
Fiware/tools.Md2pdf
|
markdown_to_pdf/test_markdown_to_pdf.py
|
Python
|
mit
| 13,579
|
# -*- coding: cp1252 -*-
'''
FizzBuzz.py
* Para a sequência de números de 1 até 100
* imprimir os números em ordem crescente substituindo
* múltiplos de 3 por Fizz, múltiplos de 5 por Buzz
* e múltiplos de 3 e 5 por FizzBuzz
* Entrada: nenhuma
* Saída: FizzBuzz
*
* Autor: Fabrício Olivetti de França
* Disciplina Processamento da Informação
* Universidade Federal do ABC
'''
for i in range(1,101):
if i%15 == 0:
print "FizzBuzz"
elif i%3 == 0:
print "Fizz"
elif i%5 == 0:
print "Buzz"
else:
print i
|
folivetti/PI-UFABC
|
AULA_03/Python/FizzBuzz.py
|
Python
|
mit
| 560
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.common.cloudstack import CloudStackDriverMixIn
from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
from libcloud.loadbalancer.base import DEFAULT_ALGORITHM
from libcloud.loadbalancer.types import Provider
from libcloud.loadbalancer.types import State
from libcloud.utils.misc import reverse_dict
class CloudStackLBDriver(CloudStackDriverMixIn, Driver):
"""Driver for CloudStack load balancers."""
api_name = 'cloudstack_lb'
name = 'CloudStack'
website = 'http://cloudstack.org/'
type = Provider.CLOUDSTACK
_VALUE_TO_ALGORITHM_MAP = {
'roundrobin': Algorithm.ROUND_ROBIN,
'leastconn': Algorithm.LEAST_CONNECTIONS
}
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
LB_STATE_MAP = {
'Active': State.RUNNING,
}
def __init__(self, key, secret=None, secure=True, host=None,
path=None, port=None, *args, **kwargs):
"""
@inherits: L{Driver.__init__}
"""
host = host if host else self.host
path = path if path else self.path
if path is not None:
self.path = path
if host is not None:
self.host = host
if (self.type == Provider.CLOUDSTACK) and (not host or not path):
raise Exception('When instantiating CloudStack driver directly ' +
'you also need to provide host and path argument')
super(CloudStackLBDriver, self).__init__(key=key, secret=secret,
secure=secure,
host=host, port=port)
def list_protocols(self):
"""
We don't actually have any protocol awareness beyond TCP.
@rtype: C{list} of C{str}
"""
return ['tcp']
def list_balancers(self):
balancers = self._sync_request('listLoadBalancerRules')
balancers = balancers.get('loadbalancerrule', [])
return [self._to_balancer(balancer) for balancer in balancers]
def get_balancer(self, balancer_id):
balancer = self._sync_request('listLoadBalancerRules', id=balancer_id)
balancer = balancer.get('loadbalancerrule', [])
if not balancer:
raise Exception("no such load balancer: " + str(balancer_id))
return self._to_balancer(balancer[0])
def create_balancer(self, name, members, protocol='http', port=80,
algorithm=DEFAULT_ALGORITHM, location=None,
private_port=None):
"""
@inherits: L{Driver.create_balancer}
@param location: Location
@type location: L{NodeLocation}
@param private_port: Private port
@type private_port: C{int}
"""
if location is None:
locations = self._sync_request('listZones')
location = locations['zone'][0]['id']
else:
location = location.id
if private_port is None:
private_port = port
result = self._async_request('associateIpAddress', zoneid=location)
public_ip = result['ipaddress']
result = self._sync_request(
'createLoadBalancerRule',
algorithm=self._ALGORITHM_TO_VALUE_MAP[algorithm],
name=name,
privateport=private_port,
publicport=port,
publicipid=public_ip['id'],
)
balancer = self._to_balancer(result['loadbalancer'])
for member in members:
balancer.attach_member(member)
return balancer
def destroy_balancer(self, balancer):
self._async_request('deleteLoadBalancerRule', id=balancer.id)
self._async_request('disassociateIpAddress',
id=balancer.ex_public_ip_id)
def balancer_attach_member(self, balancer, member):
member.port = balancer.ex_private_port
self._async_request('assignToLoadBalancerRule', id=balancer.id,
virtualmachineids=member.id)
return True
def balancer_detach_member(self, balancer, member):
self._async_request('removeFromLoadBalancerRule', id=balancer.id,
virtualmachineids=member.id)
return True
def balancer_list_members(self, balancer):
members = self._sync_request('listLoadBalancerRuleInstances',
id=balancer.id)
members = members['loadbalancerruleinstance']
return [self._to_member(m, balancer.ex_private_port, balancer) \
for m in members]
def _to_balancer(self, obj):
balancer = LoadBalancer(
id=obj['id'],
name=obj['name'],
state=self.LB_STATE_MAP.get(obj['state'], State.UNKNOWN),
ip=obj['publicip'],
port=obj['publicport'],
driver=self.connection.driver
)
balancer.ex_private_port = obj['privateport']
balancer.ex_public_ip_id = obj['publicipid']
return balancer
def _to_member(self, obj, port, balancer):
return Member(
id=obj['id'],
ip=obj['nic'][0]['ipaddress'],
port=port,
balancer=balancer
)
|
IsCoolEntertainment/debpkg_libcloud
|
libcloud/loadbalancer/drivers/cloudstack.py
|
Python
|
apache-2.0
| 6,048
|
from django.shortcuts import render
from django.shortcuts import resolve_url
from django.shortcuts import get_object_or_404
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy
from django.views.generic import FormView
from django.views.generic.detail import SingleObjectMixin
from .forms import MyForm
from .forms import VoteForm
from .models import Question
def index(request):
return render(request, 'polls/index.html', {
'questions': Question.objects.all(),
})
class FormTest(FormView):
form_class = MyForm
template_name = 'polls/form.html'
success_url = reverse_lazy('polls:index')
form_test = FormTest.as_view()
class Detail(SingleObjectMixin, FormView):
model = Question
form_class = VoteForm
context_object_name = 'question'
template_name = 'polls/detail.html'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super().post(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['question'] = self.object
return kwargs
def form_valid(self, form):
form.vote()
choice = form.cleaned_data['choice']
messages.success(self.request, '"%s"に投票しました' % choice)
return super().form_valid(form)
def get_success_url(self):
return resolve_url('polls:results', self.kwargs['pk'])
detail = Detail.as_view()
def results(request, pk):
obj = get_object_or_404(Question, pk=pk)
return render(request, 'polls/results.html', {
'question': obj,
})
|
usa-mimi/tutorial
|
tutorial/polls/views.py
|
Python
|
mit
| 1,765
|
# Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from nbxmpp.structs import EMEData
from nbxmpp.modules.base import BaseModule
class EME(BaseModule):
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_eme,
ns=Namespace.EME,
priority=40)
]
def _process_eme(self, _client, stanza, properties):
encryption = stanza.getTag('encryption', namespace=Namespace.EME)
if encryption is None:
return
name = encryption.getAttr('name')
namespace = encryption.getAttr('namespace')
if namespace is None:
self._log.warning('No namespace on message')
return
properties.eme = EMEData(name=name, namespace=namespace)
self._log.info('Found data: %s', properties.eme)
|
gajim/python-nbxmpp
|
nbxmpp/modules/eme.py
|
Python
|
gpl-3.0
| 1,740
|
#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Chromium Mac implementation of the Port interface."""
import os
import platform
import signal
import subprocess
import chromium
class ChromiumMacPort(chromium.ChromiumPort):
"""Chromium Mac implementation of the Port class."""
def __init__(self, port_name=None, options=None):
if port_name is None:
port_name = 'chromium-mac'
chromium.ChromiumPort.__init__(self, port_name, options)
def baseline_search_path(self):
return [self.baseline_path(),
self._webkit_baseline_path('mac' + self.version()),
self._webkit_baseline_path('mac')]
def check_sys_deps(self):
# We have no specific platform dependencies.
return True
def num_cores(self):
return int(subprocess.Popen(['sysctl','-n','hw.ncpu'],
stdout=subprocess.PIPE).stdout.read())
def test_platform_name(self):
# We use 'mac' instead of 'chromium-mac'
return 'mac'
def version(self):
os_version_string = platform.mac_ver()[0] # e.g. "10.5.6"
if not os_version_string:
return '-leopard'
release_version = int(os_version_string.split('.')[1])
# we don't support 'tiger' or earlier releases
if release_version == 5:
return '-leopard'
elif release_version == 6:
return '-snowleopard'
return ''
#
# PROTECTED METHODS
#
def _build_path(self, *comps):
return self.path_from_chromium_base('xcodebuild', self._options.target,
*comps)
def _lighttpd_path(self, *comps):
return self.path_from_chromium_base('third_party', 'lighttpd',
'mac', *comps)
def _kill_process(self, pid):
"""Forcefully kill the process.
Args:
pid: The id of the process to be killed.
"""
os.kill(pid, signal.SIGKILL)
def _kill_all_process(self, process_name):
"""Kill any processes running under this name."""
# On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or
# -SIGNALNUMBER must come first. Example problem:
# $ killall -u $USER -TERM lighttpd
# killall: illegal option -- T
# Use of the earlier -TERM placement is just fine on 10.5.
null = open(os.devnull)
subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
process_name], stderr=null)
null.close()
def _path_to_apache(self):
return '/usr/sbin/httpd'
def _path_to_apache_config_file(self):
return os.path.join(self.layout_tests_dir(), 'http', 'conf',
'apache2-httpd.conf')
def _path_to_lighttpd(self):
return self._lighttp_path('bin', 'lighttp')
def _path_to_lighttpd_modules(self):
return self._lighttp_path('lib')
def _path_to_lighttpd_php(self):
return self._lighttpd_path('bin', 'php-cgi')
def _path_to_driver(self):
# TODO(pinkerton): make |target| happy with case-sensitive file
# systems.
return self._build_path('TestShell.app', 'Contents', 'MacOS',
'TestShell')
def _path_to_helper(self):
return self._build_path('layout_test_helper')
def _path_to_image_diff(self):
return self._build_path('image_diff')
def _path_to_wdiff(self):
return 'wdiff'
def _shut_down_http_server(self, server_pid):
"""Shut down the lighttpd web server. Blocks until it's fully
shut down.
Args:
server_pid: The process ID of the running server.
"""
# server_pid is not set when "http_server.py stop" is run manually.
if server_pid is None:
# TODO(mmoss) This isn't ideal, since it could conflict with
# lighttpd processes not started by http_server.py,
# but good enough for now.
self._kill_all_process('lighttpd')
self._kill_all_process('httpd')
else:
try:
os.kill(server_pid, signal.SIGTERM)
# TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
except OSError:
# Sometimes we get a bad PID (e.g. from a stale httpd.pid
# file), so if kill fails on the given PID, just try to
# 'killall' web servers.
self._shut_down_http_server(None)
|
EpicCM/SPH-D700-Kernel
|
external/webkit/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
|
Python
|
gpl-2.0
| 6,074
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('head', '0003_remove_doner_contact_number'),
]
operations = [
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=500)),
('color', models.CharField(max_length=10)),
],
),
migrations.AddField(
model_name='contactnumber',
name='source',
field=models.ForeignKey(default=None, to='head.Organization'),
preserve_default=False,
),
]
|
ayys/bloodData
|
head/migrations/0004_auto_20150417_1248.py
|
Python
|
gpl-3.0
| 820
|
"""Interface for all the algorithms in MSAF."""
import numpy as np
import msaf.utils as U
class SegmenterInterface:
"""This class is an interface for all the segmenter algorithms included
in MSAF. These segmenters must inherit from it and implement one of the
following methods:
processFlat()
processHierarchical()
Additionally, two private helper functions are provided:
- preprocess
- postprocess
These are meant to do common tasks for all the segmenters and they
should be called inside the process method if needed.
All segmenters must return estimates times for the boundaries (est_times),
and estimated labels (est_labels), **even if they can't compute them**.
The following three types of algorithms with their behaviors are:
- Computes boundaries and labels:
If in_bound_times is None:
Compute the est_times
Else:
Do not compute est_times, simply use in_bound_times instead
If in_labels is None:
Compute the est_labels
Else:
Do not compute est_labels, simply use in_labels instead
- Computes boundaries only:
Compute boundaries and return est_labels as None.
- Computes labels only:
Use in_bound_times in order to compute the labels.
Return est_times as in_bound_times and the computed labels.
In these cases, est_times or est_labels will be empty (None).
"""
def __init__(self, file_struct, in_bound_idxs=None, feature="pcp",
annot_beats=False, framesync=False, features=None, **config):
"""Inits the Segmenter.
Parameters
----------
file_struct: `msaf.io.FileStruct`
Object with the file paths.
in_bound_idxs: np.array
Array containing the frame indeces of the previously find
boundaries. `None` for computing them.
feature: str
Identifier of the features (e.g., pcp, mfcc)
annot_beats: boolean
Whether to use annotated beats or estimated ones.
framesync: boolean
Whether to use frame-synchronous or beat-synchronous features.
features: dict
Previously computed features. `None` for reading them.
config: dict
Configuration for the given algorithm (see module's __config.py__).
"""
self.file_struct = file_struct
self.audio_file = file_struct.audio_file
self.in_bound_idxs = in_bound_idxs
self.feature_str = feature
self.annot_beats = annot_beats
self.framesync = framesync
self.config = config
self.features = features
def processFlat(self):
"""Main process to obtain the flat segmentation of a given track."""
raise NotImplementedError("This method does not return flat "
"segmentations.")
def processHierarchical(self):
"""Main process to obtian the hierarchical segmentation of a given
track."""
raise NotImplementedError("This method does not return hierarchical "
"segmentations.")
def _preprocess(self, valid_features=["pcp", "tonnetz", "mfcc",
"cqt", "tempogram"]):
"""This method obtains the actual features."""
# Use specific feature
if self.feature_str not in valid_features:
raise RuntimeError("Feature %s in not valid for algorithm: %s "
"(valid features are %s)." %
(self.feature_str, __name__, valid_features))
else:
try:
F = self.features.features
except KeyError:
raise RuntimeError("Feature %s in not supported by MSAF" %
(self.feature_str))
return F
def _postprocess(self, est_idxs, est_labels):
"""Post processes the estimations from the algorithm, removing empty
segments and making sure the lenghts of the boundaries and labels
match."""
# Make sure we are using the previously input bounds, if any
if self.in_bound_idxs is not None:
F = self._preprocess()
est_labels = U.synchronize_labels(self.in_bound_idxs, est_idxs,
est_labels, F.shape[0])
est_idxs = self.in_bound_idxs
# Remove empty segments if needed
est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels)
assert len(est_idxs) - 1 == len(est_labels), "Number of boundaries " \
"(%d) and number of labels(%d) don't match" % (len(est_idxs),
len(est_labels))
# Make sure the indeces are integers
est_idxs = np.asarray(est_idxs, dtype=int)
return est_idxs, est_labels
|
urinieto/msaf
|
msaf/algorithms/interface.py
|
Python
|
mit
| 5,024
|
# -*- coding: utf-8 -*-
from __future__ import print_function
|
kentfrazier/Exhibitionist
|
exhibitionist/util/__init__.py
|
Python
|
bsd-3-clause
| 62
|
from django.contrib import admin
from .models import Hub
admin.site.register(Hub)
|
iver56/useat-api
|
hub/admin.py
|
Python
|
mit
| 83
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import six.moves.urllib.parse as urlparse
from keystoneclient import exceptions as keystone_exceptions
from nikola_auth import backend
from nikola_auth import utils as auth_utils
from horizon import exceptions
from horizon import messages
from horizon.utils import functions as utils
from openstack_dashboard.api import base
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
DEFAULT_ROLE = None
# Set up our data structure for managing Identity API versions, and
# add a couple utility methods to it.
class IdentityAPIVersionManager(base.APIVersionManager):
def upgrade_v2_user(self, user):
if getattr(user, "project_id", None) is None:
user.project_id = getattr(user, "default_project_id",
getattr(user, "tenantId", None))
return user
def get_project_manager(self, *args, **kwargs):
if VERSIONS.active < 3:
manager = keystoneclient(*args, **kwargs).tenants
else:
manager = keystoneclient(*args, **kwargs).projects
return manager
VERSIONS = IdentityAPIVersionManager(
"identity", preferred_version=auth_utils.get_keystone_version())
# Import from oldest to newest so that "preferred" takes correct precedence.
try:
from keystoneclient.v2_0 import client as keystone_client_v2
VERSIONS.load_supported_version(2.0, {"client": keystone_client_v2})
except ImportError:
pass
try:
from keystoneclient.v3 import client as keystone_client_v3
VERSIONS.load_supported_version(3, {"client": keystone_client_v3})
except ImportError:
pass
class Service(base.APIDictWrapper):
"""Wrapper for a dict based on the service data from keystone."""
_attrs = ['id', 'type', 'name']
def __init__(self, service, region, *args, **kwargs):
super(Service, self).__init__(service, *args, **kwargs)
self.public_url = base.get_url_for_service(service, region,
'publicURL')
self.url = base.get_url_for_service(service, region, 'internalURL')
if self.url:
self.host = urlparse.urlparse(self.url).hostname
else:
self.host = None
self.disabled = None
self.region = region
def __unicode__(self):
if(self.type == "identity"):
return _("%(type)s (%(backend)s backend)") \
% {"type": self.type, "backend": keystone_backend_name()}
else:
return self.type
def __repr__(self):
return "<Service: %s>" % unicode(self)
def _get_endpoint_url(request, endpoint_type, catalog=None):
if getattr(request.user, "service_catalog", None):
url = base.url_for(request,
service_type='identity',
endpoint_type=endpoint_type)
else:
auth_url = getattr(settings, 'OPENSTACK_KEYSTONE_URL')
url = request.session.get('region_endpoint', auth_url)
# TODO(gabriel): When the Service Catalog no longer contains API versions
# in the endpoints this can be removed.
url = url.rstrip('/')
url = urlparse.urljoin(url, 'v%s' % VERSIONS.active)
return url
def keystoneclient(request, admin=False):
"""Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
Calls requiring the admin endpoint should have ``admin=True`` passed in
as a keyword argument.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
"""
user = request.user
if admin:
if not policy.check((("identity", "admin_required"),), request):
raise exceptions.NotAuthorized
endpoint_type = 'adminURL'
else:
endpoint_type = getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
api_version = VERSIONS.get_active_version()
# Take care of client connection caching/fetching a new client.
# Admin vs. non-admin clients are cached separately for token matching.
cache_attr = "_keystoneclient_admin" if admin \
else backend.KEYSTONE_CLIENT_ATTR
if (hasattr(request, cache_attr) and
(not user.token.id or
getattr(request, cache_attr).auth_token == user.token.id)):
conn = getattr(request, cache_attr)
else:
endpoint = _get_endpoint_url(request, endpoint_type)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug("Creating a new keystoneclient connection to %s." % endpoint)
remote_addr = request.environ.get('REMOTE_ADDR', '')
conn = api_version['client'].Client(token=user.token.id,
endpoint=endpoint,
original_ip=remote_addr,
insecure=insecure,
cacert=cacert,
auth_url=endpoint,
debug=settings.DEBUG)
setattr(request, cache_attr, conn)
return conn
def domain_create(request, name, description=None, enabled=None):
manager = keystoneclient(request, admin=True).domains
return manager.create(name,
description=description,
enabled=enabled)
def domain_get(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.get(domain_id)
def domain_delete(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.delete(domain_id)
def domain_list(request):
manager = keystoneclient(request, admin=True).domains
return manager.list()
def domain_update(request, domain_id, name=None, description=None,
enabled=None):
manager = keystoneclient(request, admin=True).domains
return manager.update(domain_id, name, description, enabled)
def tenant_create(request, name, description=None, enabled=None,
domain=None, **kwargs):
manager = VERSIONS.get_project_manager(request, admin=True)
if VERSIONS.active < 3:
return manager.create(name, description, enabled, **kwargs)
else:
return manager.create(name, domain,
description=description,
enabled=enabled, **kwargs)
def get_default_domain(request):
"""Gets the default domain object to use when creating Identity object.
Returns the domain context if is set, otherwise return the domain
of the logon user.
"""
domain_id = request.session.get("domain_context", None)
domain_name = request.session.get("domain_context_name", None)
# if running in Keystone V3 or later
if VERSIONS.active >= 3 and not domain_id:
# if no domain context set, default to users' domain
domain_id = request.user.user_domain_id
try:
domain = domain_get(request, domain_id)
domain_name = domain.name
except Exception:
LOG.warning("Unable to retrieve Domain: %s" % domain_id)
domain = base.APIDictWrapper({"id": domain_id,
"name": domain_name})
return domain
# TODO(gabriel): Is there ever a valid case for admin to be false here?
# A quick search through the codebase reveals that it's always called with
# admin=true so I suspect we could eliminate it entirely as with the other
# tenant commands.
def tenant_get(request, project, admin=True):
manager = VERSIONS.get_project_manager(request, admin=admin)
return manager.get(project)
def tenant_delete(request, project):
manager = VERSIONS.get_project_manager(request, admin=True)
return manager.delete(project)
def tenant_list(request, paginate=False, marker=None, domain=None, user=None,
admin=True):
manager = VERSIONS.get_project_manager(request, admin=admin)
page_size = utils.get_page_size(request)
limit = None
if paginate:
limit = page_size + 1
has_more_data = False
if VERSIONS.active < 3:
tenants = manager.list(limit, marker)
if paginate and len(tenants) > page_size:
tenants.pop(-1)
has_more_data = True
else:
tenants = manager.list(domain=domain, user=user)
return (tenants, has_more_data)
def tenant_update(request, project, name=None, description=None,
enabled=None, domain=None, **kwargs):
manager = VERSIONS.get_project_manager(request, admin=True)
if VERSIONS.active < 3:
return manager.update(project, name, description, enabled, **kwargs)
else:
return manager.update(project, name=name, description=description,
enabled=enabled, domain=domain, **kwargs)
def user_list(request, project=None, domain=None, group=None):
if VERSIONS.active < 3:
kwargs = {"tenant_id": project}
else:
kwargs = {
"project": project,
"domain": domain,
"group": group
}
users = keystoneclient(request, admin=True).users.list(**kwargs)
return [VERSIONS.upgrade_v2_user(user) for user in users]
def user_create(request, name=None, email=None, password=None, project=None,
enabled=None, domain=None):
manager = keystoneclient(request, admin=True).users
try:
if VERSIONS.active < 3:
user = manager.create(name, password, email, project, enabled)
return VERSIONS.upgrade_v2_user(user)
else:
return manager.create(name, password=password, email=email,
project=project, enabled=enabled,
domain=domain)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
def user_delete(request, user_id):
return keystoneclient(request, admin=True).users.delete(user_id)
def user_get(request, user_id, admin=True):
user = keystoneclient(request, admin=admin).users.get(user_id)
return VERSIONS.upgrade_v2_user(user)
def user_update(request, user, **data):
manager = keystoneclient(request, admin=True).users
error = None
if not keystone_can_edit_user():
raise keystone_exceptions.ClientException(
405, _("Identity service does not allow editing user data."))
# The v2 API updates user model, password and default project separately
if VERSIONS.active < 3:
password = data.pop('password')
project = data.pop('project')
# Update user details
try:
user = manager.update(user, **data)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
except Exception:
error = exceptions.handle(request, ignore=True)
# Update default tenant
try:
user_update_tenant(request, user, project)
user.tenantId = project
except Exception:
error = exceptions.handle(request, ignore=True)
# Check for existing roles
# Show a warning if no role exists for the project
user_roles = roles_for_user(request, user, project)
if not user_roles:
messages.warning(request,
_('User %s has no role defined for '
'that project.')
% data.get('name', None))
# If present, update password
# FIXME(gabriel): password change should be its own form + view
if password:
try:
user_update_password(request, user, password)
if user.id == request.user.id:
return utils.logout_with_message(
request,
_("Password changed. Please log in again to "
"continue."),
redirect=False
)
except Exception:
error = exceptions.handle(request, ignore=True)
if error is not None:
raise error
# v3 API is so much simpler...
else:
if not data['password']:
data.pop('password')
try:
user = manager.update(user, **data)
except keystone_exceptions.Conflict:
raise exceptions.Conflict()
if data.get('password') and user.id == request.user.id:
return utils.logout_with_message(
request,
_("Password changed. Please log in again to continue."),
redirect=False
)
def user_update_enabled(request, user, enabled):
manager = keystoneclient(request, admin=True).users
if VERSIONS.active < 3:
return manager.update_enabled(user, enabled)
else:
return manager.update(user, enabled=enabled)
def user_update_password(request, user, password, admin=True):
manager = keystoneclient(request, admin=admin).users
if VERSIONS.active < 3:
return manager.update_password(user, password)
else:
return manager.update(user, password=password)
def user_update_own_password(request, origpassword, password):
client = keystoneclient(request, admin=False)
client.user_id = request.user.id
if VERSIONS.active < 3:
return client.users.update_own_password(origpassword, password)
else:
return client.users.update_password(origpassword, password)
def user_update_tenant(request, user, project, admin=True):
manager = keystoneclient(request, admin=admin).users
if VERSIONS.active < 3:
return manager.update_tenant(user, project)
else:
return manager.update(user, project=project)
def group_create(request, domain_id, name, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.create(domain=domain_id,
name=name,
description=description)
def group_get(request, group_id, admin=True):
manager = keystoneclient(request, admin=admin).groups
return manager.get(group_id)
def group_delete(request, group_id):
manager = keystoneclient(request, admin=True).groups
return manager.delete(group_id)
def group_list(request, domain=None, project=None, user=None):
manager = keystoneclient(request, admin=True).groups
groups = manager.list(user=user, domain=domain)
if project:
project_groups = []
for group in groups:
roles = roles_for_group(request, group=group.id, project=project)
if roles and len(roles) > 0:
project_groups.append(group)
groups = project_groups
return groups
def group_update(request, group_id, name=None, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.update(group=group_id,
name=name,
description=description)
def add_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.add_to_group(group=group_id, user=user_id)
def remove_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.remove_from_group(group=group_id, user=user_id)
def get_project_groups_roles(request, project):
"""Gets the groups roles in a given project.
:param request: the request entity containing the login user information
:param project: the project to filter the groups roles. It accepts both
project object resource or project ID
:returns group_roles: a dictionary mapping the groups and their roles in
given project
"""
groups_roles = collections.defaultdict(list)
project_role_assignments = role_assignments_list(request,
project=project)
for role_assignment in project_role_assignments:
if not hasattr(role_assignment, 'group'):
continue
group_id = role_assignment.group['id']
role_id = role_assignment.role['id']
groups_roles[group_id].append(role_id)
return groups_roles
def role_assignments_list(request, project=None, user=None, role=None,
group=None, domain=None, effective=False):
if VERSIONS.active < 3:
raise exceptions.NotAvailable
manager = keystoneclient(request, admin=True).role_assignments
return manager.list(project=project, user=user, role=role, group=group,
domain=domain, effective=effective)
def role_create(request, name):
manager = keystoneclient(request, admin=True).roles
return manager.create(name)
def role_get(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.get(role_id)
def role_update(request, role_id, name=None):
manager = keystoneclient(request, admin=True).roles
return manager.update(role_id, name)
def role_delete(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.delete(role_id)
def role_list(request):
"""Returns a global list of available roles."""
return keystoneclient(request, admin=True).roles.list()
def roles_for_user(request, user, project=None, domain=None):
"""Returns a list of user roles scoped to a project or domain."""
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.roles_for_user(user, project)
else:
return manager.list(user=user, domain=domain, project=project)
def get_domain_users_roles(request, domain):
users_roles = collections.defaultdict(list)
domain_role_assignments = role_assignments_list(request,
domain=domain)
for role_assignment in domain_role_assignments:
if not hasattr(role_assignment, 'user'):
continue
user_id = role_assignment.user['id']
role_id = role_assignment.role['id']
users_roles[user_id].append(role_id)
return users_roles
def add_domain_user_role(request, user, role, domain):
"""Adds a role for a user on a domain."""
manager = keystoneclient(request, admin=True).roles
return manager.grant(role, user=user, domain=domain)
def remove_domain_user_role(request, user, role, domain=None):
"""Removes a given single role for a user from a domain."""
manager = keystoneclient(request, admin=True).roles
return manager.revoke(role, user=user, domain=domain)
def get_project_users_roles(request, project):
users_roles = collections.defaultdict(list)
if VERSIONS.active < 3:
project_users = user_list(request, project=project)
for user in project_users:
roles = roles_for_user(request, user.id, project)
roles_ids = [role.id for role in roles]
users_roles[user.id].extend(roles_ids)
else:
project_role_assignments = role_assignments_list(request,
project=project)
for role_assignment in project_role_assignments:
if not hasattr(role_assignment, 'user'):
continue
user_id = role_assignment.user['id']
role_id = role_assignment.role['id']
users_roles[user_id].append(role_id)
return users_roles
def add_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
"""Adds a role for a user on a tenant."""
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.add_user_role(user, role, project)
else:
return manager.grant(role, user=user, project=project,
group=group, domain=domain)
def remove_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
"""Removes a given single role for a user from a tenant."""
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.remove_user_role(user, role, project)
else:
return manager.revoke(role, user=user, project=project,
group=group, domain=domain)
def remove_tenant_user(request, project=None, user=None, domain=None):
"""Removes all roles from a user on a tenant, removing them from it."""
client = keystoneclient(request, admin=True)
roles = client.roles.roles_for_user(user, project)
for role in roles:
remove_tenant_user_role(request, user=user, role=role.id,
project=project, domain=domain)
def roles_for_group(request, group, domain=None, project=None):
manager = keystoneclient(request, admin=True).roles
return manager.list(group=group, domain=domain, project=project)
def add_group_role(request, role, group, domain=None, project=None):
"""Adds a role for a group on a domain or project."""
manager = keystoneclient(request, admin=True).roles
return manager.grant(role=role, group=group, domain=domain,
project=project)
def remove_group_role(request, role, group, domain=None, project=None):
"""Removes a given single role for a group from a domain or project."""
manager = keystoneclient(request, admin=True).roles
return manager.revoke(role=role, group=group, project=project,
domain=domain)
def remove_group_roles(request, group, domain=None, project=None):
"""Removes all roles from a group on a domain or project."""
client = keystoneclient(request, admin=True)
roles = client.roles.list(group=group, domain=domain, project=project)
for role in roles:
remove_group_role(request, role=role.id, group=group,
domain=domain, project=project)
def get_default_role(request):
"""Gets the default role object from Keystone and saves it as a global.
Since this is configured in settings and should not change from request
to request. Supports lookup by name or id.
"""
global DEFAULT_ROLE
default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
if default and DEFAULT_ROLE is None:
try:
roles = keystoneclient(request, admin=True).roles.list()
except Exception:
roles = []
exceptions.handle(request)
for role in roles:
if role.id == default or role.name == default:
DEFAULT_ROLE = role
break
return DEFAULT_ROLE
def ec2_manager(request):
client = keystoneclient(request)
if hasattr(client, 'ec2'):
return client.ec2
# Keystoneclient 4.0 was released without the ec2 creds manager.
from keystoneclient.v2_0 import ec2
return ec2.CredentialsManager(client)
def list_ec2_credentials(request, user_id):
return ec2_manager(request).list(user_id)
def create_ec2_credentials(request, user_id, tenant_id):
return ec2_manager(request).create(user_id, tenant_id)
def get_user_ec2_credentials(request, user_id, access_token):
return ec2_manager(request).get(user_id, access_token)
def keystone_can_edit_domain():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
can_edit_domain = backend_settings.get('can_edit_domain', True)
multi_domain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
return can_edit_domain and multi_domain_support
def keystone_can_edit_user():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_user', True)
def keystone_can_edit_project():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_project', True)
def keystone_can_edit_group():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_group', True)
def keystone_can_edit_role():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_role', True)
def keystone_backend_name():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['name']
else:
return 'unknown'
|
AlexOugh/horizon
|
openstack_dashboard/api/keystone.py
|
Python
|
apache-2.0
| 26,092
|
#要注意 javascript 轉 python 語法差異
#document.getElementById -> doc[]
#module Math -> math
#Math.PI -> math.pi
#abs -> fabs
#array 可用 list代替
import math
import time
from browser import doc
import browser.timer
# 點類別
class Point(object):
# 起始方法
def __init__(self, x, y):
self.x = x
self.y = y
# 繪製方法
def drawMe(self, g, r):
self.g = g
self.r = r
self.g.save()
self.g.moveTo(self.x,self.y)
self.g.beginPath()
# 根據 r 半徑繪製一個圓代表點的所在位置
self.g.arc(self.x, self.y, self.r, 0, 2*math.pi, true)
self.g.moveTo(self.x,self.y)
self.g.lineTo(self.x+self.r, self.y)
self.g.moveTo(self.x, self.y)
self.g.lineTo(self.x-self.r, self.y)
self.g.moveTo(self.x, self.y)
self.g.lineTo(self.x, self.y+self.r)
self.g.moveTo(self.x, self.y)
self.g.lineTo(self.x, self.y-self.r)
self.g.restore()
self.g.stroke()
# 加入 Eq 方法
def Eq(self, pt):
self.x = pt.x
self.y = pt.y
# 加入 setPoint 方法
def setPoint(self, px, py):
self.x = px
self.y = py
# 加上 distance(pt) 方法, 計算點到 pt 的距離
def distance(self, pt):
self.pt = pt
x = self.x - self.pt.x
y = self.y - self.pt.y
return math.sqrt(x * x + y * y)
# 利用文字標示點的座標位置
def tag(self, g):
self.g = g
self.g.beginPath()
self.g.fillText("%d, %d"%(self.x, self.y),self.x, self.y)
self.g.stroke()
# Line 類別物件
class Line(object):
# 起始方法
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
# 直線的第一點, 設為線尾
self.Tail = self.p1
# 直線組成的第二點, 設為線頭
self.Head = self.p2
# 直線的長度屬性
self.length = math.sqrt(math.pow(self.p2.x-self.p1.x, 2)+math.pow(self.p2.y-self.p1.y,2))
# setPP 以指定頭尾座標點來定義直線
def setPP(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.Tail = self.p1
self.Head = self.p2
self.length = math.sqrt(math.pow(self.p2.x-self.p1.x, 2)+math.pow(self.p2.y-self.p1.y,2))
# setRT 方法 for Line, 應該已經確定 Tail 點, 然後以 r, t 作為設定 Head 的參考
def setRT(self, r, t):
self.r = r
self.t = t
x = self.r * math.cos(self.t)
y = self.r * math.sin(self.t)
self.Tail.Eq(self.p1)
self.Head.setPoint(self.Tail.x + x,self.Tail.y + y)
# getR 方法 for Line
def getR(self):
# x 分量與 y 分量
x = self.p1.x - self.p2.x
y = self.p1.y - self.p2.y
return math.sqrt(x * x + y * y)
# 根據定義 atan2(y,x), 表示 (x,y) 與 正 x 軸之間的夾角, 介於 pi 與 -pi 間
def getT(self):
x = self.p2.x - self.p1.x
y = self.p2.y - self.p1.y
if (math.fabs(x) < math.pow(10,-100)):
if(y < 0.0):
return (-math.pi/2)
else:
return (math.pi/2)
else:
return math.atan2(y, x)
# setTail 方法 for Line
def setTail(self, pt):
self.pt = pt
self.Tail.Eq(pt)
self.Head.setPoint(self.pt.x + self.x, self.pt.y + self.y)
# getHead 方法 for Line
def getHead(self):
return self.Head
def getTail(self):
return self.Tail
def drawMe(self, g):
self.g = g
self.g.beginPath()
self.g.moveTo(self.p1.x,self.p1.y)
self.g.lineTo(self.p2.x,self.p2.y)
self.g.stroke()
def test(self):
return ("this is pure test to Inherit")
class Link(Line):
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.length = math.sqrt(math.pow((self.p2.x - self.p1.x), 2) + math.pow((self.p2.y - self.p1.y), 2))
#g context
def drawMe(self, g):
self.g = g
hole = 5
radius = 10
length = self.getR()
# alert(length)
# 儲存先前的繪圖狀態
self.g.save()
self.g.translate(self.p1.x,self.p1.y)
#alert(str(self.p1.x)+","+str(self.p1.y))
#self.g.rotate(-((math.pi/2)-self.getT()))
self.g.rotate(-math.pi*0.5 + self.getT())
#alert(str(self.getT()))
#self.g.rotate(10*math.pi/180)
#this.g.rotate(-(Math.PI/2-this.getT()));
# 必須配合畫在 y 軸上的 Link, 進行座標轉換, 也可以改為畫在 x 軸上...
self.g.beginPath()
self.g.moveTo(0,0)
self.g.arc(0, 0, hole, 0, 2*math.pi, true)
self.g.stroke()
self.g.moveTo(0,length)
self.g.beginPath()
self.g.arc(0,length, hole, 0, 2*math.pi, true)
self.g.stroke()
self.g.moveTo(0,0)
self.g.beginPath()
self.g.arc(0,0, radius, 0, math.pi, true)
self.g.moveTo(0+radius,0)
self.g.lineTo(0+radius,0+length)
self.g.stroke()
self.g.moveTo(0,0+length)
self.g.beginPath()
self.g.arc(0, 0+length, radius, math.pi, 0, true)
self.g.moveTo(0-radius,0+length)
self.g.lineTo(0-radius,0)
self.g.stroke()
self.g.restore()
self.g.beginPath()
self.g.fillStyle = "red"
self.g.font = "bold 18px sans-serif"
self.g.fillText("%d, %d"%(self.p2.x, self.p2.y),self.p2.x, self.p2.y)
self.g.stroke()
self.g.restore()
class Triangle(object):
def __init__(self, p1, p2, p3):
self.p1 = p1
self.p2 = p2
self.p3 = p3
def getLenp3(self):
p1 = self.p1
ret = p1.distance(self.p2)
return ret
def getLenp1(self):
p2 = self.p2
ret = p2.distance(self.p3)
return ret
def getLenp2(self):
p1 = self.p1
ret = p1.distance(self.p3)
return ret
# 角度
def getAp1(self):
ret = math.acos(((self.getLenp2() * self.getLenp2() + self.getLenp3() * self.getLenp3()) - self.getLenp1() * self.getLenp1()) / (2* self.getLenp2() * self.getLenp3()))
return ret
#
def getAp2(self):
ret =math.acos(((self.getLenp1() * self.getLenp1() + self.getLenp3() * self.getLenp3()) - self.getLenp2() * self.getLenp2()) / (2* self.getLenp1() * self.getLenp3()))
return ret
def getAp3(self):
ret = math.acos(((self.getLenp1() * self.getLenp1() + self.getLenp2() * self.getLenp2()) - self.getLenp3() * self.getLenp3()) / (2* self.getLenp1() * self.getLenp2()))
return ret
def drawMe(self, g):
self.g = g
r = 5
# 繪出三個頂點
self.p1.drawMe(self.g,r)
self.p2.drawMe(self.g,r)
self.p3.drawMe(self.g,r)
line1 = Line(self.p1,self.p2)
line2 = Line(self.p1,self.p3)
line3 = Line(self.p2,self.p3)
# 繪出三邊線
line1.drawMe(self.g)
line2.drawMe(self.g)
line3.drawMe(self.g)
# ends Triangle def
# 透過三個邊長定義三角形
def setSSS(self, lenp3, lenp1, lenp2):
self.lenp3 = lenp3
self.lenp1 = lenp1
self.lenp2 = lenp2
self.ap1 = math.acos(((self.lenp2 * self.lenp2 + self.lenp3 * self.lenp3) - self.lenp1 * self.lenp1) / (2* self.lenp2 * self.lenp3))
self.ap2 = math.acos(((self.lenp1 * self.lenp1 + self.lenp3 * self.lenp3) - self.lenp2 * self.lenp2) / (2* self.lenp1 * self.lenp3))
self.ap3 = math.acos(((self.lenp1 * self.lenp1 + self.lenp2 * self.lenp2) - self.lenp3 * self.lenp3) / (2* self.lenp1 * self.lenp2))
# 透過兩個邊長與夾角定義三角形
def setSAS(self, lenp3, ap2, lenp1):
self.lenp3 = lenp3
self.ap2 = ap2
self.lenp1 = lenp1
self.lenp2 = math.sqrt((self.lenp3 * self.lenp3 + self.lenp1 * self.lenp1) - 2* self.lenp3 * self.lenp1 * math.cos(self.ap2))
#等於 SSS(AB, BC, CA)
def setSaSS(self, lenp2, lenp3, lenp1):
self.lenp2 = lenp2
self.lenp3 = lenp3
self.lenp1 = lenp1
if(self.lenp1 > (self.lenp2 + self.lenp3)):
#<CAB 夾角為 180 度, 三點共線且 A 介於 BC 之間
ret = math.pi
else :
# <CAB 夾角為 0, 三點共線且 A 不在 BC 之間
if((self.lenp1 < (self.lenp2 - self.lenp3)) or (self.lenp1 < (self.lenp3 - self.lenp2))):
ret = 0.0
else :
# 透過餘絃定理求出夾角 <CAB
ret = math.acos(((self.lenp2 * self.lenp2 + self.lenp3 * self.lenp3) - self.lenp1 * self.lenp1) / (2 * self.lenp2 * self.lenp3))
return ret
# 取得三角形的三個邊長值
def getSSS(self):
temp = []
temp.append( self.getLenp1() )
temp.append( self.getLenp2() )
temp.append( self.getLenp3() )
return temp
# 取得三角形的三個角度值
def getAAA(self):
temp = []
temp.append( self.getAp1() )
temp.append( self.getAp2() )
temp.append( self.getAp3() )
return temp
# 取得三角形的三個角度與三個邊長
def getASASAS(self):
temp = []
temp.append(self.getAp1())
temp.append(self.getLenp1())
temp.append(self.getAp2())
temp.append(self.getLenp2())
temp.append(self.getAp3())
temp.append(self.getLenp3())
return temp
#2P 2L return mid P
def setPPSS(self, p1, p3, lenp1, lenp3):
temp = []
self.p1 = p1
self.p3 = p3
self.lenp1 = lenp1
self.lenp3 = lenp3
#bp3 is the angle beside p3 point, cp3 is the angle for line23, p2 is the output
line31 = Line(p3, p1)
self.lenp2 = line31.getR()
#self.lenp2 = self.p3.distance(self.p1)
#這裡是求角3
ap3 = math.acos(((self.lenp1 * self.lenp1 + self.lenp2 * self.lenp2) - self.lenp3 * self.lenp3) / (2 * self.lenp1 * self.lenp2))
#ap3 = math.acos(((self.lenp1 * self.lenp1 + self.lenp3 * self.lenp3) - self.lenp2 * self.lenp2) / (2 * self.lenp1 * self.lenp3))
bp3 = line31.getT()
cp3 = bp3 - ap3
temp.append(p3.x + self.lenp1*math.cos(cp3))#p2.x
temp.append(p3.y + self.lenp1*math.sin(cp3))#p2.y
return temp
def tag(g, p):
None
# 執行繪圖流程, 注意 x, y 為 global variables
def draw():
global theta
context.clearRect(0, 0, canvas.width, canvas.height)
line1.drawMe(context)
line2.drawMe(context)
line3.drawMe(context)
#triangle1.drawMe(context)
#triangle2.drawMe(context)
theta += dx
p2.x = p1.x + line1.length*math.cos(theta*degree)
p2.y = p1.y - line1.length*math.sin(theta*degree)
p3.x, p3.y = triangle2.setPPSS(p2,p4,link2_len,link3_len)
p1.tag(context)
# 以上為相關函式物件的定義區
# 全域變數
# 幾何位置輸入變數
x=10
y=10
r=10
# 畫布與繪圖內容
# 其他輸入變數
theta = 0
degree = math.pi/180.0
dx = 2
dy = 4
#set p1.p2.p3.p4 position
p1 = Point(150,100)
p2 = Point(150,200)
p3 = Point(300,300)
p4 = Point(350,100)
#accord position create link
line1 = Link(p1,p2)
line2 = Link(p2,p3)
line3 = Link(p3,p4)
line4 = Link(p1,p4)
line5 = Link(p2,p4)
link2_len = p2.distance(p3)
link3_len = p3.distance(p4)
#link2_len = line1.getR()
#link3_len = line3.getR()
#alert(str(link2_len)+','+str(link3_len))
triangle1 = Triangle(p1,p2,p4)
triangle2 = Triangle(p2,p3,p4)
# 視窗載入時執行內容
# 繪圖畫布設定
canvas = doc["plotarea"]
context = canvas.getContext("2d")
# 座標轉換, 移動 canvas.height 並且 y 座標變號, 也就是將原點座標移到畫面左下角
context.translate(0,canvas.height)
context.scale(1,-1)
#以間隔 10 micro seconds 重複呼叫 draw()
#time.set_interval(draw,20)
browser.timer.set_interval(draw,10)
|
2014c2g5/2014cadp
|
wsgi/local_data/brython_programs/brython_fourbar1.py
|
Python
|
gpl-3.0
| 11,960
|
#!/usr/bin/env python2
"""
Convert genbank to multifasta of proteins.
USAGE:
cat file.gb | gb2protein.py > file.faa
NOTE:
It's designed to work with gb files coming from GenBank. gene is used as gene_id and transcript_id (locus_tag if gene not present).
Only entries having types in allowedTypes = ['gene','CDS','tRNA','tmRNA','rRNA','ncRNA'] are stored in GTF. Need to include exon processing.
No frame info is processed. Need to be included in order to process genes having introns!
AUTHOR:
Leszek Pryszcz
lpryszcz@crg.eu
Version 0.1
"""
import os, sys
from datetime import datetime
from Bio import SeqIO
def gb2protein( source='gb2gtf',allowedTypes=set(['gene','CDS','tRNA','tmRNA','rRNA','ncRNA']) ):
"""
"""
handle = sys.stdin
for gb in SeqIO.parse( handle,'gb' ):
acc = gb.id #gb.name #gb.description # #
skipped = 0
skippedTypes = set()
for f in gb.features:
#process only gene and CDS entries
if f.type != 'CDS': #not in allowedTypes:
#skipped += 1
#skippedTypes.add( f.type )
continue
#generate comments field
if 'locus_tag' in f.qualifiers:
#use locul tag as gene_id/transcript_id
gene_id = transcript_id = f.qualifiers['locus_tag'][0]
else:
sys.stderr.write( "Error: Neither `gene` nor `locus_tag` found for entry: %s\n" % '; '.join( str(f).split('\n') ) )
continue
comments = 'gene_id "%s"; transcript_id "%s"' % ( gene_id,transcript_id )
if 'gene' in f.qualifiers:
comments += '; gene_id "%s"' % f.qualifiers['gene'][0]
if 'protein_id' in f.qualifiers:
comments += '; protein_id "%s"' % f.qualifiers['protein_id'][0]
#add external IDs
if 'db_xref' in f.qualifiers:
for extData in f.qualifiers['db_xref']:
comments += '; db_xref "%s"' % extData
#code strand as +/- (in genbank 1 or -1)
if int(f.strand)>0: strand = '+'
else: strand = '-'
fasta = ">%s\n%s\n" % ( gene_id,f.extract(gb.seq).translate() )
sys.stdout.write( fasta )
#sys.stderr.write( "%s\tSkipped %s entries having types: %s.\n" % ( gb.id,skipped,', '.join(skippedTypes) ) )
if __name__=='__main__':
t0=datetime.now()
gb2protein()
dt=datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt )
|
lpryszcz/bin
|
gb2protein.py
|
Python
|
gpl-3.0
| 2,381
|
from gym import core
class ArgumentEnv(core.Env):
calls = 0
def __init__(self, arg):
self.calls += 1
self.arg = arg
def test_env_instantiation():
# This looks like a pretty trivial, but given our usage of
# __new__, it's worth having.
env = ArgumentEnv('arg')
assert env.arg == 'arg'
assert env.calls == 1
|
xpharry/Udacity-DLFoudation
|
tutorials/reinforcement/gym/gym/tests/test_core.py
|
Python
|
mit
| 353
|
#!/usr/bin/env python
import os
def get_project_root_path():
try:
project_path = os.environ['ICE_HOME']
except:
project_path = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
return project_path
def get_area_filepath():
project_path = get_project_root_path()
area_filepath = os.path.join(project_path, 'areas.cfg')
return area_filepath
|
mitkin/avhrr-sic-analysis
|
satistjenesten/utils.py
|
Python
|
mit
| 392
|
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""
Ciphertext Block Chaining (CBC) mode.
"""
__all__ = ['CbcMode']
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer,
create_string_buffer, get_raw_buffer,
SmartPointer, c_size_t, expect_byte_string)
from Cryptodome.Random import get_random_bytes
raw_cbc_lib = load_pycryptodome_raw_lib("Cryptodome.Cipher._raw_cbc", """
int CBC_start_operation(void *cipher,
const uint8_t iv[],
size_t iv_len,
void **pResult);
int CBC_encrypt(void *cbcState,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CBC_decrypt(void *cbcState,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CBC_stop_operation(void *state);
"""
)
class CbcMode(object):
"""*Cipher-Block Chaining (CBC)*.
Each of the ciphertext blocks depends on the current
and all previous plaintext blocks.
An Initialization Vector (*IV*) is required.
See `NIST SP800-38A`_ , Section 6.2 .
.. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
:undocumented: __init__
"""
def __init__(self, block_cipher, iv):
"""Create a new block cipher, configured in CBC mode.
:Parameters:
block_cipher : C pointer
A smart pointer to the low-level block cipher instance.
iv : byte string
The initialization vector to use for encryption or decryption.
It is as long as the cipher block.
**The IV must be unpredictable**. Ideally it is picked randomly.
Reusing the *IV* for encryptions performed with the same key
compromises confidentiality.
"""
expect_byte_string(iv)
self._state = VoidPointer()
result = raw_cbc_lib.CBC_start_operation(block_cipher.get(),
iv,
c_size_t(len(iv)),
self._state.address_of())
if result:
raise ValueError("Error %d while instatiating the CBC mode"
% result)
# Ensure that object disposal of this Python object will (eventually)
# free the memory allocated by the raw library for the cipher mode
self._state = SmartPointer(self._state.get(),
raw_cbc_lib.CBC_stop_operation)
# Memory allocated for the underlying block cipher is now owed
# by the cipher mode
block_cipher.release()
self.block_size = len(iv)
"""The block size of the underlying cipher, in bytes."""
self.iv = iv
"""The Initialization Vector originally used to create the object.
The value does not change."""
self.IV = iv
"""Alias for `iv`"""
self._next = [ self.encrypt, self.decrypt ]
def encrypt(self, plaintext):
"""Encrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have encrypted a message
you cannot encrypt (or decrypt) another message using the same
object.
The data to encrypt can be broken up in two or
more pieces and `encrypt` can be called multiple times.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is equivalent to:
>>> c.encrypt(a+b)
That also means that you cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not add any padding to the plaintext.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
Its lenght must be multiple of the cipher block size.
:Return:
the encrypted data, as a byte string.
It is as long as *plaintext*.
"""
if self.encrypt not in self._next:
raise TypeError("encrypt() cannot be called after decrypt()")
self._next = [ self.encrypt ]
expect_byte_string(plaintext)
ciphertext = create_string_buffer(len(plaintext))
result = raw_cbc_lib.CBC_encrypt(self._state.get(),
plaintext,
ciphertext,
c_size_t(len(plaintext)))
if result:
raise ValueError("Error %d while encrypting in CBC mode" % result)
return get_raw_buffer(ciphertext)
def decrypt(self, ciphertext):
"""Decrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have decrypted a message
you cannot decrypt (or encrypt) another message with the same
object.
The data to decrypt can be broken up in two or
more pieces and `decrypt` can be called multiple times.
That is, the statement:
>>> c.decrypt(a) + c.decrypt(b)
is equivalent to:
>>> c.decrypt(a+b)
This function does not remove any padding from the plaintext.
:Parameters:
ciphertext : byte string
The piece of data to decrypt.
Its length must be multiple of the cipher block size.
:Return: the decrypted data (byte string).
"""
if self.decrypt not in self._next:
raise TypeError("decrypt() cannot be called after encrypt()")
self._next = [ self.decrypt ]
expect_byte_string(ciphertext)
plaintext = create_string_buffer(len(ciphertext))
result = raw_cbc_lib.CBC_decrypt(self._state.get(),
ciphertext,
plaintext,
c_size_t(len(ciphertext)))
if result:
raise ValueError("Error %d while decrypting in CBC mode" % result)
return get_raw_buffer(plaintext)
def _create_cbc_cipher(factory, **kwargs):
"""Instantiate a cipher object that performs CBC encryption/decryption.
:Parameters:
factory : module
The underlying block cipher, a module from ``Cryptodome.Cipher``.
:Keywords:
iv : byte string
The IV to use for CBC.
IV : byte string
Alias for ``iv``.
Any other keyword will be passed to the underlying block cipher.
See the relevant documentation for details (at least ``key`` will need
to be present).
"""
cipher_state = factory._create_base_cipher(kwargs)
iv = kwargs.pop("IV", None)
IV = kwargs.pop("iv", None)
if (None, None) == (iv, IV):
iv = get_random_bytes(factory.block_size)
if iv is not None:
if IV is not None:
raise TypeError("You must either use 'iv' or 'IV', not both")
else:
iv = IV
if kwargs:
raise TypeError("Unknown parameters for CBC: %s" % str(kwargs))
return CbcMode(cipher_state, iv)
|
Haynie-Research-and-Development/jarvis
|
deps/lib/python3.4/site-packages/Cryptodome/Cipher/_mode_cbc.py
|
Python
|
gpl-2.0
| 8,915
|
"""
05-complex-sequences.py - Exploring generators that sequence values.
This example explores two more generators that sequence values.
**EventSlide** ::
EventSlide(values, segment, step, startpos=0, wraparound=True,
occurrences=inf, stopEventsWhenDone=True)
EventSlide plays sub-melodies of length 'segment' and then moves by 'step'
positions from the beginning of the last segment to start another one, and
so on. The argument 'step' can be negative to move backward and the 'startpos'
can also be negative to start from the end of the list. If 'wraparound' is True,
indexing wraps around if goes past beginning or end. If False, the playback
stops if it goes outside the list bounds.
**EventIndex** ::
EventIndex(values, index, occurrences=inf, stopEventsWhenDone=True)
EventIndex uses an 'index' parameter to read specific positions into the
list 'values'. This is useful, for instance, to read a scale using degree
values.
"""
from pyo import *
s = Server().boot()
scl = [5.00, 5.02, 5.03, 5.05, 5.07, 5.08, 5.10, 6.00, 6.02, 6.03, 6.05, 6.07]
# 3 notes segments, moving forward 1 index each iteration.
e = Events(
degree=EventSlide(scl, segment=3, step=1, startpos=0),
beat=1 / 2.0,
db=-6,
attack=0.001,
decay=0.05,
sustain=0.5,
release=0.005,
).play()
# 4 notes segments, moving backward (from the end) 2 indexes each iteration.
e2 = Events(
degree=EventSlide(scl, segment=4, step=-2, startpos=-1),
beat=1,
db=-12,
transpo=12,
attack=0.001,
decay=0.05,
sustain=0.5,
release=0.005,
).play()
# Arpeggio on the root chord.
e3 = Events(
degree=EventIndex(scl, EventSeq([0, 4, 2, 0, 4, 2, 0, 2, 4, 7, 4, 2])),
db=EventSeq([-6, -12, -12]),
beat=1 / 4.0,
transpo=-12,
attack=0.001,
decay=0.05,
sustain=0.5,
release=0.005,
).play()
s.gui(locals())
|
belangeo/pyo
|
pyo/examples/22-events/05-complex-sequences.py
|
Python
|
lgpl-3.0
| 1,878
|
from Bio import SeqIO
import re
def extract(fasta, chrom, start = None, end = None):
''' Function to extract sequence from a FASTA file which is then
returned as a string. Function takes four arguments:
1) fasta - Input fasta file.
2) chrom - Chromosome name.
3) start - First base of sequence to extract.
4) end - Last base of sequence to extract.
'''
# Check arguments
if not isinstance(chrom, str):
raise TypeError('chrom must be a string')
if start and not isinstance(start, int):
raise TypeError('start must be an integer')
if end and not isinstance(end, int):
raise TypeError('end must be an integer')
# Create iterator and loop through sequences
for sequence in SeqIO.parse(open(fasta), 'fasta'):
# Find chromosome within fasta file
if sequence.id == chrom:
# Create start and end values if not supplied
if start is None:
start = 1
if end is None:
end = len(sequence)
# Check end is not greater than sequence length
if end > len(sequence):
raise ValueError('Interval extends beyond chromosome')
# Check end is greater than or equal to start
if end < start:
raise ValueError('End preceeds start')
# Extract and return sequence
return(str(sequence.seq[start-1:end]))
# Raise error if chromosome not found
raise ValueError('Chromosome not found in FASTA file')
def replace(insert, target, start, end):
''' Function replaces a portion of a sequence with an alternative
sequence. Function takes four
arguments:
1) insert - Sequence to insert into target sequence.
2) target - Target sequence.
3) start - Start of target sequence to be replaced.
4) end - End of target sequence to be replaces.
Function return the novel chimeric sequence as well as the excised
portion of the target sequence
'''
# Adjust start to account for python zero index
start -= 1
# Split target
left = target[ : start]
replace = target[start : end]
right = target[end : ]
# Insert reference
chimera = left + insert + right
return(chimera, replace)
def wrap(sequence, width = 60):
''' Function wraps string into lines of a fixed width. Function
takes two arguments:
1) sequence
2) width - Maximum line width excluding newline character
'''
# Remove whitespace from sequence
sequence = re.sub('\s+','',sequence)
# Add line breaks and return
regx = '([^\s]{%s})' %(width)
sequence = re.sub(regx, '\\1\n', sequence)
return(sequence.strip())
|
adam-rabinowitz/ngs_analysis
|
fasta/faProcess.py
|
Python
|
gpl-2.0
| 2,749
|
import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import numpy as np
from load import mnist
srng = RandomStreams()
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def rectify(X):
return T.maximum(X, 0.)
def softmax(X):
e_x = T.exp(X - X.max(axis=1).dimshuffle(0, 'x'))
return e_x / e_x.sum(axis=1).dimshuffle(0, 'x')
def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
def dropout(X, p=0.):
if p > 0:
retain_prob = 1 - p
X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
X /= retain_prob
return X
def model(X, w_h, w_h2, w_o, p_drop_input, p_drop_hidden):
X = dropout(X, p_drop_input)
h = rectify(T.dot(X, w_h))
h = dropout(h, p_drop_hidden)
h2 = rectify(T.dot(h, w_h2))
h2 = dropout(h2, p_drop_hidden)
py_x = softmax(T.dot(h2, w_o))
return h, h2, py_x
trX, teX, trY, teY = mnist(onehot=True)
X = T.fmatrix()
Y = T.fmatrix()
w_h = init_weights((784, 625))
w_h2 = init_weights((625, 625))
w_o = init_weights((625, 10))
noise_h, noise_h2, noise_py_x = model(X, w_h, w_h2, w_o, 0.2, 0.5)
h, h2, py_x = model(X, w_h, w_h2, w_o, 0., 0.)
y_x = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(noise_py_x, Y))
params = [w_h, w_h2, w_o]
updates = RMSprop(cost, params, lr=0.001)
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=y_x, allow_input_downcast=True)
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
cost = train(trX[start:end], trY[start:end])
print np.mean(np.argmax(teY, axis=1) == predict(teX))
|
escherba/Theano-Tutorials
|
4_modern_net.py
|
Python
|
mit
| 2,247
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('official_account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('msgid', models.CharField(max_length=50, verbose_name='MsgID\u6216FromUserName+CreateTime')),
('target', models.CharField(max_length=50, verbose_name='\u76ee\u6807\u7528\u6237OpenID')),
('source', models.CharField(max_length=50, verbose_name='\u6765\u6e90\u7528\u6237OpenID')),
('time', models.IntegerField(verbose_name='\u4fe1\u606f\u53d1\u9001\u65f6\u95f4')),
('type', models.CharField(max_length=15, verbose_name='\u4fe1\u606f\u7c7b\u578b', choices=[(b'text', '\u6587\u672c\u6d88\u606f'), (b'image', '\u56fe\u7247\u6d88\u606f'), (b'video', '\u89c6\u9891\u6d88\u606f'), (b'voice', '\u8bed\u97f3\u6d88\u606f'), (b'news', '\u56fe\u6587\u6d88\u606f'), (b'music', '\u97f3\u4e50\u6d88\u606f'), (b'waiting', '\u6267\u884c\u4e2d\u6d88\u606f')])),
('pattern', models.IntegerField(verbose_name='\u54cd\u5e94\u65b9\u5f0f', choices=[(0, '\u6b63\u5e38XML\u8fd4\u56de\u6a21\u5f0f'), (1, '\u591a\u5ba2\u670d\u8fd4\u56de\u6a21\u5f0f'), (2, '\u6a21\u62df\u767b\u9646\u8fd4\u56de\u6a21\u5f0f'), (3, '\u6267\u884c\u4e2d\u6d88\u606f')])),
('raw', models.TextField(verbose_name='\u54cd\u5e94\u4fe1\u606f\u539f\u59cb\u5185\u5bb9')),
('plugin_iden', models.CharField(max_length=50, null=True, verbose_name='\u63d2\u4ef6\u6807\u8bc6\u7b26', blank=True)),
('reply_id', models.IntegerField(null=True, verbose_name='\u63d2\u4ef6\u56de\u590dID', blank=True)),
('official_account', models.ForeignKey(verbose_name='\u6240\u5c5e\u516c\u4f17\u53f7', to='official_account.OfficialAccount')),
],
options={
'db_table': 'response',
'verbose_name': '\u5fae\u4fe1\u670d\u52a1\u5668\u54cd\u5e94\u4fe1\u606f',
'verbose_name_plural': '\u5fae\u4fe1\u670d\u52a1\u5668\u54cd\u5e94\u4fe1\u606f',
},
bases=(models.Model,),
),
]
|
doraemonext/wechat-platform
|
wechat_platform/system/response/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 2,392
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2015 Clear ICT Solutions <info@clearict.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import fields, models
class FleetVehicle(models.Model):
_inherit = 'fleet.vehicle'
# Fields
#
engine_no = fields.Char('Engine Number')
|
Clear-ICT/odoo-addons
|
fleet_engine_number/models/fleet.py
|
Python
|
agpl-3.0
| 978
|
from django.db import models
class MonthlyWeatherByCity(models.Model):
month = models.IntegerField()
boston_temp = models.DecimalField(max_digits=5, decimal_places=1)
houston_temp = models.DecimalField(max_digits=5, decimal_places=1)
new_york_temp = models.DecimalField(max_digits=5, decimal_places=1)
san_francisco_temp = models.DecimalField(max_digits=5, decimal_places=1)
class MonthlyWeatherSeattle(models.Model):
month = models.IntegerField()
seattle_temp = models.DecimalField(max_digits=5, decimal_places=1)
class DailyWeather(models.Model):
month = models.IntegerField()
day = models.IntegerField()
temperature = models.DecimalField(max_digits=5, decimal_places=1)
city = models.CharField(max_length=50)
state = models.CharField(max_length=2)
class Author(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
def __unicode__(self):
return '%s %s' % (self.first_name, self.last_name)
class Publisher(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return '%s' % (self.name)
class Genre(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return '%s' % (self.name)
class Book(models.Model):
title = models.CharField(max_length=50)
rating = models.FloatField()
rating_count = models.IntegerField()
authors = models.ManyToManyField(Author)
publisher = models.ForeignKey(Publisher, null=True, blank=True,
on_delete=models.SET_NULL)
published_at = models.DateTimeField(null=True, blank=True)
related = models.ManyToManyField('self', blank=True)
genre = models.ForeignKey(Genre, null=True, blank=True,
on_delete=models.SET_NULL)
def __unicode__(self):
return '%s' % (self.title)
class City(models.Model):
city = models.CharField(max_length=50)
state = models.CharField(max_length=2)
def __unicode__(self):
return '%s, %s' % (self.city, self.state)
def region(self):
return 'USA:%s' % self.city
class BookStore(models.Model):
name = models.CharField(max_length=50)
city = models.ForeignKey('City')
def __unicode__(self):
return '%s' % (self.name)
class SalesHistory(models.Model):
bookstore = models.ForeignKey(BookStore)
book = models.ForeignKey(Book)
sale_date = models.DateField()
sale_qty = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
def __unicode__(self):
return '%s %s %s' % (self.bookstore, self.book, self.sale_date)
|
pgollakota/django-chartit
|
demoproject/demoproject/models.py
|
Python
|
bsd-2-clause
| 2,689
|
"""
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - Michael Baudin
Copyright (C) 2012 - Maria Christopoulou
Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
Copyright (C) 2009 - Yann Collette
Copyright (C) 2009 - CEA - Jean-Marc Martinez
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
"""
import re
import numpy as np
__all__ = ['np', 'fullfact', 'ff2n', 'fracfact']
def fullfact(levels):
"""
Create a general full-factorial design
Parameters
----------
levels : array-like
An array of integers that indicate the number of levels of each input
design factor.
Returns
-------
mat : 2d-array
The design matrix with coded levels 0 to k-1 for a k-level factor
Example
-------
::
>>> fullfact([2, 4, 3])
array([[ 0., 0., 0.],
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 1., 1., 0.],
[ 0., 2., 0.],
[ 1., 2., 0.],
[ 0., 3., 0.],
[ 1., 3., 0.],
[ 0., 0., 1.],
[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 1., 1.],
[ 0., 2., 1.],
[ 1., 2., 1.],
[ 0., 3., 1.],
[ 1., 3., 1.],
[ 0., 0., 2.],
[ 1., 0., 2.],
[ 0., 1., 2.],
[ 1., 1., 2.],
[ 0., 2., 2.],
[ 1., 2., 2.],
[ 0., 3., 2.],
[ 1., 3., 2.]])
"""
n = len(levels) # number of factors
nb_lines = np.prod(levels) # number of trial conditions
H = np.zeros((nb_lines, n))
level_repeat = 1
range_repeat = np.prod(levels)
for i in range(n):
range_repeat //= levels[i]
lvl = []
for j in range(levels[i]):
lvl += [j]*level_repeat
rng = lvl*range_repeat
level_repeat *= levels[i]
H[:, i] = rng
return H
################################################################################
def ff2n(n):
"""
Create a 2-Level full-factorial design
Parameters
----------
n : int
The number of factors in the design.
Returns
-------
mat : 2d-array
The design matrix with coded levels -1 and 1
Example
-------
::
>>> ff2n(3)
array([[-1., -1., -1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.],
[ 1., -1., 1.],
[-1., 1., 1.],
[ 1., 1., 1.]])
"""
return 2*fullfact([2]*n) - 1
################################################################################
def fracfact(gen):
"""
Create a 2-level fractional-factorial design with a generator string.
Parameters
----------
gen : str
A string, consisting of lowercase, uppercase letters or operators "-"
and "+", indicating the factors of the experiment
Returns
-------
H : 2d-array
A m-by-n matrix, the fractional factorial design. m is 2^k, where k
is the number of letters in ``gen``, and n is the total number of
entries in ``gen``.
Notes
-----
In ``gen`` we define the main factors of the experiment and the factors
whose levels are the products of the main factors. For example, if
gen = "a b ab"
then "a" and "b" are the main factors, while the 3rd factor is the product
of the first two. If we input uppercase letters in ``gen``, we get the same
result. We can also use the operators "+" and "-" in ``gen``.
For example, if
gen = "a b -ab"
then the 3rd factor is the opposite of the product of "a" and "b".
The output matrix includes the two level full factorial design, built by
the main factors of ``gen``, and the products of the main factors. The
columns of ``H`` follow the sequence of ``gen``.
For example, if
gen = "a b ab c"
then columns H[:, 0], H[:, 1], and H[:, 3] include the two level full
factorial design and H[:, 2] includes the products of the main factors.
Examples
--------
::
>>> fracfact("a b ab")
array([[-1., -1., 1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., 1.]])
>>> fracfact("A B AB")
array([[-1., -1., 1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., 1.]])
>>> fracfact("a b -ab c +abc")
array([[-1., -1., -1., -1., -1.],
[ 1., -1., 1., -1., 1.],
[-1., 1., 1., -1., 1.],
[ 1., 1., -1., -1., -1.],
[-1., -1., -1., 1., 1.],
[ 1., -1., 1., 1., -1.],
[-1., 1., 1., 1., -1.],
[ 1., 1., -1., 1., 1.]])
"""
# Recognize letters and combinations
#### fixed for python 3.7 by alfoa
A = [item for item in re.split(r'\-|\s|\+', gen) if item] # remove empty strings
C = [len(item) for item in A]
# Indices of single letters (main factors)
I = [i for i, item in enumerate(C) if item==1]
# Indices of letter combinations (we need them to fill out H2 properly).
J = [i for i, item in enumerate(C) if item!=1]
# Check if there are "-" or "+" operators in gen
U = [item for item in gen.split(' ') if item] # remove empty strings
# If R1 is either None or not, the result is not changed, since it is a
# multiplication of 1.
R1 = _grep(U, '+')
R2 = _grep(U, '-')
# Fill in design with two level factorial design
H1 = ff2n(len(I))
H = np.zeros((H1.shape[0], len(C)))
H[:, I] = H1
# Recognize combinations and fill in the rest of matrix H2 with the proper
# products
for k in J:
# For lowercase letters
xx = np.array([ord(c) for c in A[k]]) - 97
# For uppercase letters
if np.any(xx<0):
xx = np.array([ord(c) for c in A[k]]) - 65
H[:, k] = np.prod(H1[:, xx], axis=1)
# Update design if gen includes "-" operator
if R2:
H[:, R2] *= -1
# Return the fractional factorial design
return H
def _grep(haystack, needle):
try:
haystack[0]
except (TypeError, AttributeError):
return [0] if needle in haystack else []
else:
locs = []
for idx, item in enumerate(haystack):
if needle in item:
locs += [idx]
return locs
|
idaholab/raven
|
framework/contrib/pyDOE/doe_factorial.py
|
Python
|
apache-2.0
| 7,243
|
from .mpdserializer import CommandError
assert CommandError
from .mpdserializer import ConnectionError
assert ConnectionError
from .mpdserializer import MPDError
assert MPDError
from .mpdserializer import ProtocolError
assert ProtocolError
from .mpdserializer import deserialize_hello
assert deserialize_hello
from .mpdserializer import deserialize_nothing
assert deserialize_nothing
from .mpdserializer import deserialize_tuple
assert deserialize_tuple
from .mpdserializer import deserialize_dict
assert deserialize_dict
from .mpdserializer import deserialize_songs
assert deserialize_songs
from .mpdserializer import serialize_command
assert serialize_command
|
duganchen/qmpdsocket
|
qmpdsocket/mpdserializer/__init__.py
|
Python
|
mit
| 671
|
# encoding: utf-8
u'''MCL — Organ Folder'''
from ._base import IIngestableFolder, Ingestor, IngestableFolderView
from .interfaces import IOrgan
from five import grok
class IOrganFolder(IIngestableFolder):
u'''Folder containing body systems, also known as organs.'''
class OrganIngestor(Ingestor):
u'''RDF ingestor for organs.'''
grok.context(IOrganFolder)
def getContainedObjectInterface(self):
return IOrgan
class View(IngestableFolderView):
u'''View for an organ folder'''
grok.context(IOrganFolder)
|
MCLConsortium/mcl-site
|
src/jpl.mcl.site.knowledge/src/jpl/mcl/site/knowledge/organfolder.py
|
Python
|
apache-2.0
| 544
|
# модули брать тут https://pypi.python.org/pypi/
import os, sys, json
import test_ext
def f():
return test_ext.fext()
#print (test_ext.x)
f()
|
a-langer/lo_report
|
rununo/test.py
|
Python
|
apache-2.0
| 171
|
def extractNewbietranslatorsWordpressCom(item):
'''
Parser for 'newbietranslators.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Monster Factory', 'Monster Factory', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractNewbietranslatorsWordpressCom.py
|
Python
|
bsd-3-clause
| 654
|
"""
tests for dnstest_checks.py check_renamed_name() and verify_renamed_name()
The latest version of this package is available at:
<https://github.com/jantman/pydnstest>
##################################################################################
Copyright 2013-2017 Jason Antman <jason@jasonantman.com>
This file is part of pydnstest.
pydnstest is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pydnstest is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
##################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/pydnstest> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
##################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
"""
import pytest
import sys
import os
from pydnstest.checks import DNStestChecks
from pydnstest.config import DnstestConfig
"""
This dict stores the DNS results that our DNS-mocking functions will return.
Format of the 'known_dns' dict:
[chk|ver] - whether this is pre-change (_check methods) or post-change (_verify methods)
[prod|test] - whether this is for the prod or test DNS server
[fwd|rev] - whether this is forward or reverse DNS
[recordname] - the name of this record, as sent to the DNS query methods
= value - a string or list of the record value, see below
value can be:
for 'rev' dns:
- a string with the value of the PTR record
- the string "SERVFAIL", which returns a SERVFAIL result
for 'fwd' dns:
- a list whose first item is "STATUS", and whose second item is the 'status' attribute of the DNS result
- a list whose first item is the data/value of the record, and whose second item is the typename of the record (i.e. "A" or "CNAME")
"""
known_dns = {'chk': {'test': {'fwd': {}, 'rev': {}}, 'prod': {'fwd': {}, 'rev': {}}}, 'ver': {'test': {'fwd': {}, 'rev': {}}, 'prod': {'fwd': {}, 'rev': {}}}}
"""
This is a dict of dicts, each one corresponding to a single test case, and
having the following elements:
'oldname' - the old DNS record to be renamed
'newname' - what to rename that to
'value' - the value of the DNS record to rename
'result_chk' - the expected return dict for the check operation
'result_ver' - the expected return dict for the verify operation
"""
TESTS = {}
"""
Here we define all of the tests, along with their expected results for
check and verify, and the DNS entries that each test uses.
"""
# test 0 - valid rename, no reverse DNS
TESTS[0] = {'oldname': "renametest0", 'newname': "renametest0b", 'value': "1.2.3.20"}
known_dns['chk']['prod']['fwd']['renametest0.example.com'] = ['1.2.3.20', 'A']
known_dns['chk']['test']['fwd']['renametest0b.example.com'] = ['1.2.3.20', 'A']
known_dns['ver']['prod']['fwd']['renametest0b.example.com'] = ['1.2.3.20', 'A']
TESTS[0]['result_chk'] = {'message': 'rename renametest0 => renametest0b (TEST)', 'result': True, 'secondary': [], 'warnings': ['REVERSE NG: no reverse DNS appears to be set for 1.2.3.20 (TEST)']}
TESTS[0]['result_ver'] = {'message': 'rename renametest0 => renametest0b (PROD)', 'result': True, 'secondary': [], 'warnings': ['REVERSE NG: no reverse DNS appears to be set for 1.2.3.20 (PROD)']}
# test 1 - valid rename, but reverse DNS not updated (left at old value)
TESTS[1] = {'oldname': "renametest1.example.com", 'newname': "renametest1b.example.com", 'value': "1.2.3.21"}
known_dns['chk']['prod']['fwd']['renametest1.example.com'] = ['1.2.3.21', 'A']
known_dns['chk']['test']['fwd']['renametest1b.example.com'] = ['1.2.3.21', 'A']
known_dns['chk']['test']['rev']['1.2.3.21'] = 'renametest1.example.com'
known_dns['ver']['prod']['fwd']['renametest1b.example.com'] = ['1.2.3.21', 'A']
known_dns['ver']['prod']['rev']['1.2.3.21'] = 'renametest1.example.com'
TESTS[1]['result_chk'] = {'message': 'rename renametest1.example.com => renametest1b.example.com (TEST)', 'result': True, 'secondary': [], 'warnings': ['REVERSE NG: 1.2.3.21 appears to still have reverse DNS set to renametest1.example.com (TEST)']}
TESTS[1]['result_ver'] = {'message': 'rename renametest1.example.com => renametest1b.example.com (PROD)', 'result': True, 'secondary': [], 'warnings': ['REVERSE NG: 1.2.3.21 appears to still have reverse DNS set to renametest1.example.com (PROD)']}
# test 2 - everything right including reverse DNS
TESTS[2] = {'oldname': "renametest2", 'newname': "renametest2b", 'value': "1.2.3.22"}
known_dns['chk']['prod']['fwd']['renametest2.example.com'] = ['1.2.3.22', 'A']
known_dns['chk']['test']['fwd']['renametest2b.example.com'] = ['1.2.3.22', 'A']
known_dns['chk']['test']['rev']['1.2.3.22'] = 'renametest2b.example.com'
known_dns['ver']['prod']['fwd']['renametest2b.example.com'] = ['1.2.3.22', 'A']
known_dns['ver']['prod']['rev']['1.2.3.22'] = 'renametest2b.example.com'
TESTS[2]['result_chk'] = {'message': 'rename renametest2 => renametest2b (TEST)', 'result': True, 'secondary': ['REVERSE OK: reverse DNS is set correctly for 1.2.3.22 (TEST)'], 'warnings': []}
TESTS[2]['result_ver'] = {'message': 'rename renametest2 => renametest2b (PROD)', 'result': True, 'secondary': ['REVERSE OK: reverse DNS is set correctly for 1.2.3.22 (PROD)'], 'warnings': []}
# test 3 - this one should fail, it's actually an addition and a deletion, but values differ
TESTS[3] = {'oldname': "renametest3", 'newname': "renametest3b", 'value': "1.2.3.24"}
known_dns['chk']['prod']['fwd']['renametest3.example.com'] = ['1.2.3.23', 'A']
known_dns['chk']['test']['fwd']['renametest3b.example.com'] = ['1.2.3.23', 'A']
TESTS[3]['result_chk'] = {'message': 'renametest3 => renametest3b rename is bad, resolves to 1.2.3.23 in TEST (expected value was 1.2.3.24) (TEST)', 'result': False, 'secondary': [], 'warnings': []}
# test 4 - addition of new name, old name still active
TESTS[4] = {'oldname': "addedname2", 'newname': "renamedname", 'value': "1.2.3.12"}
known_dns['chk']['prod']['fwd']['renamedname.example.com'] = ['1.2.3.12', 'A']
known_dns['chk']['prod']['fwd']['addedname2.example.com'] = ['1.2.3.12', 'A']
known_dns['chk']['test']['fwd']['renamedname.example.com'] = ['1.2.3.12', 'A']
known_dns['chk']['test']['fwd']['addedname2.example.com'] = ['1.2.3.12', 'A']
known_dns['ver']['prod']['fwd']['renamedname.example.com'] = ['1.2.3.12', 'A']
known_dns['ver']['prod']['fwd']['addedname2.example.com'] = ['1.2.3.12', 'A']
TESTS[4]['result_chk'] = {'message': 'addedname2 got answer from TEST (1.2.3.12), old name is still active (TEST)', 'result': False, 'secondary': [], 'warnings': []}
TESTS[4]['result_ver'] = {'message': 'addedname2 got answer from PROD (1.2.3.12), old name is still active (PROD)', 'result': False, 'secondary': [], 'warnings': []}
# test 5 - SERVFAIL in prod
TESTS[5] = {'oldname': "renametest5", 'newname': "renametest5b", 'value': "renametest5cname"}
known_dns['chk']['prod']['fwd']['renametest5.example.com'] = ['STATUS', 'SERVFAIL']
known_dns['chk']['test']['fwd']['renametest5b.example.com'] = ['renametest5cname', 'CNAME']
known_dns['ver']['prod']['fwd']['renametest5b.example.com'] = ['STATUS', 'SERVFAIL']
TESTS[5]['result_chk'] = {'message': "renametest5 got status SERVFAIL from PROD - cannot change a name that doesn't exist (PROD)", 'result': False, 'secondary': [], 'warnings': []}
TESTS[5]['result_ver'] = {'message': "renametest5b got status SERVFAIL (PROD)", 'result': False, 'secondary': [], 'warnings': []}
# test 6 - SERVFAIL in test
TESTS[6] = {'oldname': "renametest6", 'newname': "renametest6b", 'value': "1.2.5.6"}
known_dns['chk']['prod']['fwd']['renametest6.example.com'] = ['1.2.5.6', 'A']
known_dns['chk']['test']['fwd']['renametest6b.example.com'] = ['STATUS', 'SERVFAIL']
known_dns['chk']['test']['rev']['1.2.5.6'] = 'renametest6b.example.com'
TESTS[6]['result_chk'] = {'message': "renametest6b got status SERVFAIL (TEST)", 'result': False, 'secondary': [], 'warnings': []}
# test 7 - valid but different answers in test and prod
TESTS[7] = {'oldname': "renametest7", 'newname': "renametest7b", 'value': '1.2.5.7'}
known_dns['chk']['test']['fwd']['renametest7b.example.com'] = ['1.2.5.7', 'A']
known_dns['chk']['prod']['fwd']['renametest7.example.com'] = ['1.2.4.7', 'A']
known_dns['ver']['test']['fwd']['renametest7b.example.com'] = ['1.2.5.7', 'A']
known_dns['ver']['prod']['fwd']['renametest7b.example.com'] = ['1.2.4.7', 'A']
TESTS[7]['result_chk'] = {'message': 'renametest7 => renametest7b rename is bad, resolves to 1.2.5.7 in TEST and 1.2.4.7 in PROD', 'result': False, 'secondary': [], 'warnings': []}
TESTS[7]['result_ver'] = {'message': 'renametest7 => renametest7b rename is bad, resolves to 1.2.5.7 in PROD (expected value was 1.2.5.7) (PROD)', 'result': False, 'secondary': [], 'warnings': []}
# test 8 - valid rename, is a CNAME
TESTS[8] = {'oldname': "renametest8", 'newname': "renametest8b", 'value': "renametest8cname"}
known_dns['chk']['prod']['fwd']['renametest8.example.com'] = ['renametest8cname', 'CNAME']
known_dns['chk']['test']['fwd']['renametest8b.example.com'] = ['renametest8cname', 'CNAME']
known_dns['ver']['prod']['fwd']['renametest8b.example.com'] = ['renametest8cname', 'CNAME']
TESTS[8]['result_chk'] = {'message': 'rename renametest8 => renametest8b (TEST)', 'result': True, 'secondary': [], 'warnings': []}
TESTS[8]['result_ver'] = {'message': 'rename renametest8 => renametest8b (PROD)', 'result': True, 'secondary': [], 'warnings': []}
class TestDNSCheckRename:
"""
Test DNS checks, using stubbed name resolution methods that return static values.
The code in this class checks the logic of dnstest.py's test_*_name methods, which take
input describing the change, and query nameservers to check current prod and staging status.
"""
@pytest.fixture(scope="module")
def setup_checks(self):
"""
Sets up test environment for tests of check methods,
including redefining resolve_name and lookup_reverse
to the appropriate methods in this class
"""
config = DnstestConfig()
config.server_test = "test"
config.server_prod = "prod"
config.default_domain = ".example.com"
config.have_reverse_dns = True
chk = DNStestChecks(config)
# stub
chk.DNS.resolve_name = self.stub_resolve_name
# stub
chk.DNS.lookup_reverse = self.stub_lookup_reverse
return chk
@pytest.fixture(scope="module")
def setup_verifies(self):
"""
Sets up test environment for tests of verify methods,
including redefining resolve_name and lookup_reverse
to the appropriate methods in this class
"""
config = DnstestConfig()
config.server_test = "test"
config.server_prod = "prod"
config.default_domain = ".example.com"
config.have_reverse_dns = True
chk = DNStestChecks(config)
# stub
chk.DNS.resolve_name = self.stub_resolve_name_verify
# stub
chk.DNS.lookup_reverse = self.stub_lookup_reverse_verify
return chk
def stub_resolve_name(self, query, to_server, to_port=53):
"""
DNS stub method
return a dict that looks like the return value from pydnstest.resolve_name
but either returns one of a hard-coded group of dicts, or an error.
"""
if query in known_dns['chk'][to_server]['fwd'] and known_dns['chk'][to_server]['fwd'][query][0] == "STATUS":
return {'status': known_dns['chk'][to_server]['fwd'][query][1]}
elif query in known_dns['chk'][to_server]['fwd']:
return {'answer': {'name': query, 'data': known_dns['chk'][to_server]['fwd'][query][0], 'typename': known_dns['chk'][to_server]['fwd'][query][1], 'classstr': 'IN', 'ttl': 360, 'type': 5, 'class': 1, 'rdlength': 14}}
else:
return {'status': 'NXDOMAIN'}
def stub_lookup_reverse(self, name, to_server, to_port=53):
"""
DNS stub method
return a dict that looks like the return value from pydnstest.lookup_reverse
but either returns one of a hard-coded group of dicts, or an error.
"""
if name in known_dns['chk'][to_server]['rev'] and known_dns['chk'][to_server]['rev'][name] == "SERVFAIL":
return {'status': 'SERVFAIL'}
elif name in known_dns['chk'][to_server]['rev']:
return {'answer': {'name': name, 'data': known_dns['chk'][to_server]['rev'][name], 'typename': 'PTR', 'classstr': 'IN', 'ttl': 360, 'type': 12, 'class': 1, 'rdlength': 33}}
else:
return {'status': 'NXDOMAIN'}
def stub_resolve_name_verify(self, query, to_server, to_port=53):
"""
DNS stub method
return a dict that looks like the return value from pydnstest.resolve_name
but either returns one of a hard-coded group of dicts, or an error.
"""
if query in known_dns['ver'][to_server]['fwd'] and known_dns['ver'][to_server]['fwd'][query][0] == "STATUS":
return {'status': known_dns['ver'][to_server]['fwd'][query][1]}
elif query in known_dns['ver'][to_server]['fwd']:
return {'answer': {'name': query, 'data': known_dns['ver'][to_server]['fwd'][query][0], 'typename': known_dns['ver'][to_server]['fwd'][query][1], 'classstr': 'IN', 'ttl': 360, 'type': 5, 'class': 1, 'rdlength': 14}}
else:
return {'status': 'NXDOMAIN'}
def stub_lookup_reverse_verify(self, name, to_server, to_port=53):
"""
DNS stub method
return a dict that looks like the return value from pydnstest.lookup_reverse
but either returns one of a hard-coded group of dicts, or an error.
"""
if name in known_dns['ver'][to_server]['rev'] and known_dns['ver'][to_server]['rev'][name] == "SERVFAIL":
return {'status': 'SERVFAIL'}
elif name in known_dns['ver'][to_server]['rev']:
return {'answer': {'name': name, 'data': known_dns['ver'][to_server]['rev'][name], 'typename': 'PTR', 'classstr': 'IN', 'ttl': 360, 'type': 12, 'class': 1, 'rdlength': 33}}
else:
return {'status': 'NXDOMAIN'}
###########################################
# Done with setup, start the actual tests #
###########################################
def test_rename(self):
"""
Run all of the tests from the TESTS dict, via yield
"""
sc = self.setup_checks()
sv = self.setup_verifies()
for t in TESTS:
tst = TESTS[t]
if 'result_chk' in tst:
yield "test_rename chk TESTS[%d]" % t, self.dns_rename, sc, tst['oldname'], tst['newname'], tst['value'], tst['result_chk']
if 'result_ver' in tst:
yield "test_rename ver TESTS[%d]" % t, self.dns_verify_rename, sv, tst['oldname'], tst['newname'], tst['value'], tst['result_ver']
def dns_rename(self, setup_checks, oldname, newname, value, result):
"""
Test checks for renaming a record in DNS (new name, same value)
"""
foo = setup_checks.check_renamed_name(oldname, newname, value)
assert foo == result
def dns_verify_rename(self, setup_verifies, oldname, newname, value, result):
"""
Test checks for verifying a renamed record in DNS (new name, same value)
"""
foo = setup_verifies.verify_renamed_name(oldname, newname, value)
assert foo == result
|
jantman/pydnstest
|
pydnstest/tests/dnstest_check_rename_test.py
|
Python
|
agpl-3.0
| 16,335
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['LinearTrend'] , ['Seasonal_MonthOfYear'] , ['SVR'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_Seasonal_MonthOfYear_SVR.py
|
Python
|
bsd-3-clause
| 168
|
import sys
from SpaceDock.config import _cfg, _cfgi
from SpaceDock.database import db, init_db
from SpaceDock.objects import User
from SpaceDock.email import send_confirmation
init_db()
if sys.argv[1] == 'delete_user':
user = User.query.filter(User.username == sys.argv[2]).first()
if not user:
sys.exit("User not found.")
else:
db.delete(user)
db.commit()
print("Success.")
sys.exit()
|
EIREXE/SpaceDock
|
admin.py
|
Python
|
mit
| 459
|
#!/usr/bin/python3
import argparse
import code
import readline
import signal
import sys
from parse import Argparser, premain, SigHandler_SIGINT,PythonInterpreter
from utils import ParseFlags
def getWASMModule():
module_path = sys.argv[1]
interpreter = PythonInterpreter()
module = interpreter.parse(module_path)
def main():
signal.signal(signal.SIGINT, SigHandler_SIGINT)
argparser = Argparser()
if argparser.args.dbg:
try:
premain(argparser)
except Exception as e:
print(e.__doc__)
if e.message: print(e.message)
variables = globals().copy()
variables.update(locals())
shell = code.InteractiveConsole(variables)
shell.interact(banner="DEVIWASM REPL")
else:
premain(argparser)
if __name__ == "__main__":
main()
|
bloodstalker/mutator
|
bruiser/wasm/dwasm.py
|
Python
|
gpl-3.0
| 855
|
"""
This module contains celery task functions for handling the sending of bulk email
to a course.
"""
import re
import random
import json
from time import sleep
from dogapi import dog_stats_api
from smtplib import SMTPServerDisconnected, SMTPDataError, SMTPConnectError, SMTPException
from boto.ses.exceptions import (
SESAddressNotVerifiedError,
SESIdentityNotVerifiedError,
SESDomainNotConfirmedError,
SESAddressBlacklistedError,
SESDailyQuotaExceededError,
SESMaxSendingRateExceededError,
SESDomainEndsWithDotError,
SESLocalAddressCharacterError,
SESIllegalAddressError,
)
from boto.exception import AWSConnectionError
from celery import task, current_task
from celery.utils.log import get_task_logger
from celery.states import SUCCESS, FAILURE, RETRY
from celery.exceptions import RetryTaskError
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import EmailMultiAlternatives, get_connection
from django.core.urlresolvers import reverse
from bulk_email.models import (
CourseEmail, Optout, CourseEmailTemplate,
SEND_TO_MYSELF, SEND_TO_ALL, TO_OPTIONS,
)
from courseware.courses import get_course, course_image_url
from student.roles import CourseStaffRole, CourseInstructorRole
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
SubtaskStatus,
queue_subtasks_for_query,
check_subtask_is_valid,
update_subtask_status,
)
log = get_task_logger(__name__)
# Errors that an individual email is failing to be sent, and should just
# be treated as a fail.
SINGLE_EMAIL_FAILURE_ERRORS = (
SESAddressBlacklistedError, # Recipient's email address has been temporarily blacklisted.
SESDomainEndsWithDotError, # Recipient's email address' domain ends with a period/dot.
SESIllegalAddressError, # Raised when an illegal address is encountered.
SESLocalAddressCharacterError, # An address contained a control or whitespace character.
)
# Exceptions that, if caught, should cause the task to be re-tried.
# These errors will be caught a limited number of times before the task fails.
LIMITED_RETRY_ERRORS = (
SMTPConnectError,
SMTPServerDisconnected,
AWSConnectionError,
)
# Errors that indicate that a mailing task should be retried without limit.
# An example is if email is being sent too quickly, but may succeed if sent
# more slowly. When caught by a task, it triggers an exponential backoff and retry.
# Retries happen continuously until the email is sent.
# Note that the SMTPDataErrors here are only those within the 4xx range.
# Those not in this range (i.e. in the 5xx range) are treated as hard failures
# and thus like SINGLE_EMAIL_FAILURE_ERRORS.
INFINITE_RETRY_ERRORS = (
SESMaxSendingRateExceededError, # Your account's requests/second limit has been exceeded.
SMTPDataError,
)
# Errors that are known to indicate an inability to send any more emails,
# and should therefore not be retried. For example, exceeding a quota for emails.
# Also, any SMTP errors that are not explicitly enumerated above.
BULK_EMAIL_FAILURE_ERRORS = (
SESAddressNotVerifiedError, # Raised when a "Reply-To" address has not been validated in SES yet.
SESIdentityNotVerifiedError, # Raised when an identity has not been verified in SES yet.
SESDomainNotConfirmedError, # Raised when domain ownership is not confirmed for DKIM.
SESDailyQuotaExceededError, # 24-hour allotment of outbound email has been exceeded.
SMTPException,
)
def _get_recipient_queryset(user_id, to_option, course_id, course_location):
"""
Returns a query set of email recipients corresponding to the requested to_option category.
`to_option` is either SEND_TO_MYSELF, SEND_TO_STAFF, or SEND_TO_ALL.
Recipients who are in more than one category (e.g. enrolled in the course and are staff or self)
will be properly deduped.
"""
if to_option not in TO_OPTIONS:
log.error("Unexpected bulk email TO_OPTION found: %s", to_option)
raise Exception("Unexpected bulk email TO_OPTION found: {0}".format(to_option))
if to_option == SEND_TO_MYSELF:
recipient_qset = User.objects.filter(id=user_id)
else:
staff_qset = CourseStaffRole(course_location).users_with_role()
instructor_qset = CourseInstructorRole(course_location).users_with_role()
recipient_qset = staff_qset | instructor_qset
if to_option == SEND_TO_ALL:
# We also require students to have activated their accounts to
# provide verification that the provided email address is valid.
enrollment_qset = User.objects.filter(
is_active=True,
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
recipient_qset = recipient_qset | enrollment_qset
recipient_qset = recipient_qset.distinct()
recipient_qset = recipient_qset.order_by('pk')
return recipient_qset
def _get_course_email_context(course):
"""
Returns context arguments to apply to all emails, independent of recipient.
"""
course_id = course.id
course_title = course.display_name
course_url = 'https://{}{}'.format(
settings.SITE_NAME,
reverse('course_root', kwargs={'course_id': course_id})
)
image_url = 'https://{}{}'.format(settings.SITE_NAME, course_image_url(course))
email_context = {
'course_title': course_title,
'course_url': course_url,
'course_image_url': image_url,
'account_settings_url': 'https://{}{}'.format(settings.SITE_NAME, reverse('dashboard')),
'platform_name': settings.PLATFORM_NAME,
}
return email_context
def perform_delegate_email_batches(entry_id, course_id, task_input, action_name):
"""
Delegates emails by querying for the list of recipients who should
get the mail, chopping up into batches of no more than settings.BULK_EMAIL_EMAILS_PER_TASK
in size, and queueing up worker jobs.
"""
entry = InstructorTask.objects.get(pk=entry_id)
# Get inputs to use in this task from the entry.
user_id = entry.requester.id
task_id = entry.task_id
# Perfunctory check, since expansion is made for convenience of other task
# code that doesn't need the entry_id.
if course_id != entry.course_id:
format_msg = u"Course id conflict: explicit value {} does not match task value {}"
log.warning("Task %s: %s", task_id, format_msg.format(course_id, entry.course_id))
raise ValueError("Course id conflict: explicit value does not match task value")
# Fetch the CourseEmail.
email_id = task_input['email_id']
try:
email_obj = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist:
# The CourseEmail object should be committed in the view function before the task
# is submitted and reaches this point.
log.warning("Task %s: Failed to get CourseEmail with id %s", task_id, email_id)
raise
# Check to see if email batches have already been defined. This seems to
# happen sometimes when there is a loss of connection while a task is being
# queued. When this happens, the same task gets called again, and a whole
# new raft of subtasks gets queued up. We will assume that if subtasks
# have already been defined, there is no need to redefine them below.
# So we just return right away. We don't raise an exception, because we want
# the current task to be marked with whatever it had been marked with before.
if len(entry.subtasks) > 0 and len(entry.task_output) > 0:
log.warning("Task %s has already been processed for email %s! InstructorTask = %s", task_id, email_id, entry)
progress = json.loads(entry.task_output)
return progress
# Sanity check that course for email_obj matches that of the task referencing it.
if course_id != email_obj.course_id:
format_msg = u"Course id conflict: explicit value {} does not match email value {}"
log.warning("Task %s: %s", task_id, format_msg.format(course_id, entry.course_id))
raise ValueError("Course id conflict: explicit value does not match email value")
# Fetch the course object.
try:
course = get_course(course_id)
except ValueError:
log.exception("Task %s: course not found: %s", task_id, course_id)
raise
# Get arguments that will be passed to every subtask.
to_option = email_obj.to_option
global_email_context = _get_course_email_context(course)
def _create_send_email_subtask(to_list, initial_subtask_status):
"""Creates a subtask to send email to a given recipient list."""
subtask_id = initial_subtask_status.task_id
new_subtask = send_course_email.subtask(
(
entry_id,
email_id,
to_list,
global_email_context,
initial_subtask_status.to_dict(),
),
task_id=subtask_id,
routing_key=settings.BULK_EMAIL_ROUTING_KEY,
)
return new_subtask
recipient_qset = _get_recipient_queryset(user_id, to_option, course_id, course.location)
recipient_fields = ['profile__name', 'email']
log.info(u"Task %s: Preparing to queue subtasks for sending emails for course %s, email %s, to_option %s",
task_id, course_id, email_id, to_option)
progress = queue_subtasks_for_query(
entry,
action_name,
_create_send_email_subtask,
recipient_qset,
recipient_fields,
settings.BULK_EMAIL_EMAILS_PER_QUERY,
settings.BULK_EMAIL_EMAILS_PER_TASK
)
# We want to return progress here, as this is what will be stored in the
# AsyncResult for the parent task as its return value.
# The AsyncResult will then be marked as SUCCEEDED, and have this return value as its "result".
# That's okay, for the InstructorTask will have the "real" status, and monitoring code
# should be using that instead.
return progress
@task(default_retry_delay=settings.BULK_EMAIL_DEFAULT_RETRY_DELAY, max_retries=settings.BULK_EMAIL_MAX_RETRIES) # pylint: disable=E1102
def send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status_dict):
"""
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status_dict` : dict containing values representing current status. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
Most values will be zero on initial call, but may be different when the task is
invoked as part of a retry.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html. Updates InstructorTask object
with status information (sends, failures, skips) and updates number of subtasks completed.
"""
subtask_status = SubtaskStatus.from_dict(subtask_status_dict)
current_task_id = subtask_status.task_id
num_to_send = len(to_list)
log.info("Preparing to send email %s to %d recipients as subtask %s for instructor task %d: context = %s, status=%s",
email_id, num_to_send, current_task_id, entry_id, global_email_context, subtask_status)
# Check that the requested subtask is actually known to the current InstructorTask entry.
# If this fails, it throws an exception, which should fail this subtask immediately.
# This can happen when the parent task has been run twice, and results in duplicate
# subtasks being created for the same InstructorTask entry. This can happen when Celery
# loses its connection to its broker, and any current tasks get requeued.
# We hope to catch this condition in perform_delegate_email_batches() when it's the parent
# task that is resubmitted, but just in case we fail to do so there, we check here as well.
# There is also a possibility that this task will be run twice by Celery, for the same reason.
# To deal with that, we need to confirm that the task has not already been completed.
check_subtask_is_valid(entry_id, current_task_id, subtask_status)
send_exception = None
new_subtask_status = None
try:
course_title = global_email_context['course_title']
with dog_stats_api.timer('course_email.single_task.time.overall', tags=[_statsd_tag(course_title)]):
new_subtask_status, send_exception = _send_course_email(
entry_id,
email_id,
to_list,
global_email_context,
subtask_status,
)
except Exception:
# Unexpected exception. Try to write out the failure to the entry before failing.
log.exception("Send-email task %s for email %s: failed unexpectedly!", current_task_id, email_id)
# We got here for really unexpected reasons. Since we don't know how far
# the task got in emailing, we count all recipients as having failed.
# It at least keeps the counts consistent.
subtask_status.increment(failed=num_to_send, state=FAILURE)
update_subtask_status(entry_id, current_task_id, subtask_status)
raise
if send_exception is None:
# Update the InstructorTask object that is storing its progress.
log.info("Send-email task %s for email %s: succeeded", current_task_id, email_id)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
elif isinstance(send_exception, RetryTaskError):
# If retrying, a RetryTaskError needs to be returned to Celery.
# We assume that the the progress made before the retry condition
# was encountered has already been updated before the retry call was made,
# so we only log here.
log.warning("Send-email task %s for email %s: being retried", current_task_id, email_id)
raise send_exception # pylint: disable=E0702
else:
log.error("Send-email task %s for email %s: failed: %s", current_task_id, email_id, send_exception)
update_subtask_status(entry_id, current_task_id, new_subtask_status)
raise send_exception # pylint: disable=E0702
# return status in a form that can be serialized by Celery into JSON:
log.info("Send-email task %s for email %s: returning status %s", current_task_id, email_id, new_subtask_status)
return new_subtask_status.to_dict()
def _filter_optouts_from_recipients(to_list, course_id):
"""
Filters a recipient list based on student opt-outs for a given course.
Returns the filtered recipient list, as well as the number of optouts
removed from the list.
"""
optouts = Optout.objects.filter(
course_id=course_id,
user__in=[i['pk'] for i in to_list]
).values_list('user__email', flat=True)
optouts = set(optouts)
# Only count the num_optout for the first time the optouts are calculated.
# We assume that the number will not change on retries, and so we don't need
# to calculate it each time.
num_optout = len(optouts)
to_list = [recipient for recipient in to_list if recipient['email'] not in optouts]
return to_list, num_optout
def _get_source_address(course_id, course_title):
"""
Calculates an email address to be used as the 'from-address' for sent emails.
Makes a unique from name and address for each course, e.g.
"COURSE_TITLE" Course Staff <coursenum-no-reply@courseupdates.edx.org>
"""
course_title_no_quotes = re.sub(r'"', '', course_title)
# The course_id is assumed to be in the form 'org/course_num/run',
# so pull out the course_num. Then make sure that it can be used
# in an email address, by substituting a '_' anywhere a non-(ascii, period, or dash)
# character appears.
course_num = course_id.split('/')[1]
invalid_chars = re.compile(r"[^\w.-]")
course_num = invalid_chars.sub('_', course_num)
from_addr = u'"{0}" Course Staff <{1}-{2}>'.format(course_title_no_quotes, course_num, settings.BULK_EMAIL_DEFAULT_FROM_EMAIL)
return from_addr
def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):
"""
Performs the email sending task.
Sends an email to a list of recipients.
Inputs are:
* `entry_id`: id of the InstructorTask object to which progress should be recorded.
* `email_id`: id of the CourseEmail model that is to be emailed.
* `to_list`: list of recipients. Each is represented as a dict with the following keys:
- 'profile__name': full name of User.
- 'email': email address of User.
- 'pk': primary key of User model.
* `global_email_context`: dict containing values that are unique for this email but the same
for all recipients of this email. This dict is to be used to fill in slots in email
template. It does not include 'name' and 'email', which will be provided by the to_list.
* `subtask_status` : object of class SubtaskStatus representing current status.
Sends to all addresses contained in to_list that are not also in the Optout table.
Emails are sent multi-part, in both plain text and html.
Returns a tuple of two values:
* First value is a SubtaskStatus object which represents current progress at the end of this call.
* Second value is an exception returned by the innards of the method, indicating a fatal error.
In this case, the number of recipients that were not sent have already been added to the
'failed' count above.
"""
# Get information from current task's request:
task_id = subtask_status.task_id
try:
course_email = CourseEmail.objects.get(id=email_id)
except CourseEmail.DoesNotExist as exc:
log.exception("Task %s: could not find email id:%s to send.", task_id, email_id)
raise
# Exclude optouts (if not a retry):
# Note that we don't have to do the optout logic at all if this is a retry,
# because we have presumably already performed the optout logic on the first
# attempt. Anyone on the to_list on a retry has already passed the filter
# that existed at that time, and we don't need to keep checking for changes
# in the Optout list.
if subtask_status.get_retry_count() == 0:
to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)
subtask_status.increment(skipped=num_optout)
course_title = global_email_context['course_title']
subject = "[" + course_title + "] " + course_email.subject
from_addr = _get_source_address(course_email.course_id, course_title)
course_email_template = CourseEmailTemplate.get_template()
try:
connection = get_connection()
connection.open()
# Define context values to use in all course emails:
email_context = {'name': '', 'email': ''}
email_context.update(global_email_context)
while to_list:
# Update context with user-specific values from the user at the end of the list.
# At the end of processing this user, they will be popped off of the to_list.
# That way, the to_list will always contain the recipients remaining to be emailed.
# This is convenient for retries, which will need to send to those who haven't
# yet been emailed, but not send to those who have already been sent to.
current_recipient = to_list[-1]
email = current_recipient['email']
email_context['email'] = email
email_context['name'] = current_recipient['profile__name']
# Construct message content using templates and context:
plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)
html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)
# Create email:
email_msg = EmailMultiAlternatives(
subject,
plaintext_msg,
from_addr,
[email],
connection=connection
)
email_msg.attach_alternative(html_msg, 'text/html')
# Throttle if we have gotten the rate limiter. This is not very high-tech,
# but if a task has been retried for rate-limiting reasons, then we sleep
# for a period of time between all emails within this task. Choice of
# the value depends on the number of workers that might be sending email in
# parallel, and what the SES throttle rate is.
if subtask_status.retried_nomax > 0:
sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)
try:
log.debug('Email with id %s to be sent to %s', email_id, email)
with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):
connection.send_messages([email_msg])
except SMTPDataError as exc:
# According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.
if exc.smtp_code >= 400 and exc.smtp_code < 500:
# This will cause the outer handler to catch the exception and retry the entire task.
raise exc
else:
# This will fall through and not retry the message.
log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc.smtp_error)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
except SINGLE_EMAIL_FAILURE_ERRORS as exc:
# This will fall through and not retry the message.
log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc)
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
subtask_status.increment(failed=1)
else:
dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])
if settings.BULK_EMAIL_LOG_SENT_EMAILS:
log.info('Email with id %s sent to %s', email_id, email)
else:
log.debug('Email with id %s sent to %s', email_id, email)
subtask_status.increment(succeeded=1)
# Pop the user that was emailed off the end of the list only once they have
# successfully been processed. (That way, if there were a failure that
# needed to be retried, the user is still on the list.)
to_list.pop()
except INFINITE_RETRY_ERRORS as exc:
dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_nomax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_nomax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True
)
except LIMITED_RETRY_ERRORS as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# Errors caught are those that indicate a temporary condition that might succeed on retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
except BULK_EMAIL_FAILURE_ERRORS as exc:
dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])
num_pending = len(to_list)
log.exception('Task %s: email with id %d caused send_course_email task to fail with "fatal" exception. %d emails unsent.',
task_id, email_id, num_pending)
# Update counters with progress to date, counting unsent emails as failures,
# and set the state to FAILURE:
subtask_status.increment(failed=num_pending, state=FAILURE)
return subtask_status, exc
except Exception as exc:
# Errors caught here cause the email to be retried. The entire task is actually retried
# without popping the current recipient off of the existing list.
# These are unexpected errors. Since they might be due to a temporary condition that might
# succeed on retry, we give them a retry.
dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])
log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',
task_id, email_id)
# Increment the "retried_withmax" counter, update other counters with progress to date,
# and set the state to RETRY:
subtask_status.increment(retried_withmax=1, state=RETRY)
return _submit_for_retry(
entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False
)
else:
# All went well. Update counters with progress to date,
# and set the state to SUCCESS:
subtask_status.increment(state=SUCCESS)
# Successful completion is marked by an exception value of None.
return subtask_status, None
finally:
# Clean up at the end.
connection.close()
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
def _submit_for_retry(entry_id, email_id, to_list, global_email_context, current_exception, subtask_status, skip_retry_max=False):
"""
Helper function to requeue a task for retry, using the new version of arguments provided.
Inputs are the same as for running a task, plus two extra indicating the state at the time of retry.
These include the `current_exception` that the task encountered that is causing the retry attempt,
and the `subtask_status` that is to be returned. A third extra argument `skip_retry_max`
indicates whether the current retry should be subject to a maximum test.
Returns a tuple of two values:
* First value is a dict which represents current progress. Keys are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
* Second value is an exception returned by the innards of the method. If the retry was
successfully submitted, this value will be the RetryTaskError that retry() returns.
Otherwise, it (ought to be) the current_exception passed in.
"""
task_id = subtask_status.task_id
log.info("Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)",
task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)
# Calculate time until we retry this task (in seconds):
# The value for max_retries is increased by the number of times an "infinite-retry" exception
# has been retried. We want the regular retries to trigger max-retry checking, but not these
# special retries. So we count them separately.
max_retries = _get_current_task().max_retries + subtask_status.retried_nomax
base_delay = _get_current_task().default_retry_delay
if skip_retry_max:
# once we reach five retries, don't increase the countdown further.
retry_index = min(subtask_status.retried_nomax, 5)
exception_type = 'sending-rate'
# if we have a cap, after all, apply it now:
if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):
retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax
max_retries = min(max_retries, retry_cap)
else:
retry_index = subtask_status.retried_withmax
exception_type = 'transient'
# Skew the new countdown value by a random factor, so that not all
# retries are deferred by the same amount.
countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)
log.warning('Task %s: email with id %d not delivered due to %s error %s, retrying send to %d recipients in %s seconds (with max_retry=%s)',
task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)
# we make sure that we update the InstructorTask with the current subtask status
# *before* actually calling retry(), to be sure that there is no race
# condition between this update and the update made by the retried task.
update_subtask_status(entry_id, task_id, subtask_status)
# Now attempt the retry. If it succeeds, it returns a RetryTaskError that
# needs to be returned back to Celery. If it fails, we return the existing
# exception.
try:
send_course_email.retry(
args=[
entry_id,
email_id,
to_list,
global_email_context,
subtask_status.to_dict(),
],
exc=current_exception,
countdown=countdown,
max_retries=max_retries,
throw=True,
)
except RetryTaskError as retry_error:
# If the retry call is successful, update with the current progress:
log.exception('Task %s: email with id %d caused send_course_email task to retry.',
task_id, email_id)
return subtask_status, retry_error
except Exception as retry_exc:
# If there are no more retries, because the maximum has been reached,
# we expect the original exception to be raised. We catch it here
# (and put it in retry_exc just in case it's different, but it shouldn't be),
# and update status as if it were any other failure. That means that
# the recipients still in the to_list are counted as failures.
log.exception('Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',
task_id, email_id, [i['email'] for i in to_list])
num_failed = len(to_list)
subtask_status.increment(subtask_status, failed=num_failed, state=FAILURE)
return subtask_status, retry_exc
def _statsd_tag(course_title):
"""
Calculate the tag we will use for DataDog.
"""
tag = u"course_email:{0}".format(course_title)
return tag[:200]
|
pku9104038/edx-platform
|
lms/djangoapps/bulk_email/tasks.py
|
Python
|
agpl-3.0
| 33,371
|
from guess_language import guess_language
from FilterHelper import removeLinks
class LanguageFilter:
def __init__(self, language):
"""The language should be a ISO 639-1 code of a language supported by https://bitbucket.org/spirit/guess_language"""
self.language = language
def filterTweet(self, data):
return guess_language(removeLinks(data['text'])) == self.language
|
JoelHoskin/CatHack
|
LanguageFilter.py
|
Python
|
mit
| 378
|
# -*- coding: utf-8 -*-
"""
这是一个用以获取用户豆瓣数据的爬虫,使得用户可以进行数据的本地备份。
支持:
1.豆瓣电影,豆瓣读书【暂不支持】
2.csv文件为逗号分割符文件。
@author: DannyVim
"""
import urllib2 as ur
from bs4 import BeautifulSoup as bs
import sys
import time
reload(sys)
sys.setdefaultencoding('utf8')
# BASE URL
def basepage(wa):
m_wish = 'http://movie.douban.com/people/' + user + '/wish?start='
m_do = 'http://movie.douban.com/people/' + user + '/do?start='
m_collect = 'http://movie.douban.com/people/' + user + '/collect?start='
if wa == 'do':
baseurl = m_do
elif wa == 'wish':
baseurl = m_wish
elif wa == 'collect':
baseurl = m_collect
link_list(baseurl)
# 知道目录下有多少页,并且打开每一页获取数据
def link_list(pageurl):
info = ur.urlopen(pageurl)
soup = bs(info)
try:
t = soup.find('span', class_='thispage')['data-total-page']
except TypeError:
content(pageurl)
else:
n = 0
t = int(t) - 1
for i in range(t):
pagelist = pageurl + str(n)
content(pagelist)
n = n + 15
# 显示程序运行进度,但是这个只在CMD中有效OTZ
percent = 1.0 * i / t * 100
print 'complete percent:' + str(percent) + '%',
sys.stdout.write("\r")
time.sleep(0.1)
# 利用bs4库把静态的网页解析出来并挑选有用数据
def content(html):
info = ur.urlopen(html)
soup = bs(info)
for tag in soup.body(attrs={'class': 'item'}):
datum = open('datum.csv', 'a+')
title = tag.em.string.strip()
url = tag.li.a.get('href')
date = tag.find('span', class_='date').get_text()
comment = tag.find('span', class_='comment')
if comment == None:
comment = ''
else:
comment = comment.get_text()
comment = comment.encode('utf-8')
title = title.encode('utf-8')
url = url.encode('utf-8')
date = date.encode('utf-8')
print >> datum, url, ',', date, ',', title, ',', comment
datum.close()
# 运行
print u'这是一个用以获取用户豆瓣数据的爬虫,使得用户可以进行数据的本地备份。'
user = raw_input('Please input your DB user name:')
wanted = raw_input('Please input what you want to sync:(do,wish,collect)')
basepage(wanted)
|
DannyVim/ToolsCollection
|
Outdated/db_movie.py
|
Python
|
gpl-2.0
| 2,485
|
import RPi.GPIO as GPIO
import time
import numpy as np
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
red_led = 17 # LED is in GPIO 4
yellow_led = 27 # Yello LED is in GPIO 27
green_led = 22 # Green LED is in GPIO 22
red_button = 14 # Red Button is in GPIO 14
yellow_button = 15 # Yellow Button is in GPIO 15
green_button = 23 # Green Button is in GPIO 23
rgb_red = 10 # RGB LED red in GPIO 10
rgb_green = 9 # RGB LED green in GPIO 9
rgb_blue = 11 # RGB LED blue in GPIO 11
buzzer = 24 # Buzzer in GPIO 24
# Setup GPIO OUT for LEDs
GPIO.setup(red_led, GPIO.OUT)
GPIO.setup(yellow_led, GPIO.OUT)
GPIO.setup(green_led, GPIO.OUT)
# Setup GPIO IN & PUD_UP for Buttons
GPIO.setup(red_button, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(yellow_button, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(green_button, GPIO.IN, GPIO.PUD_UP)
# Setup GPIO for RGB LED
GPIO.setup(rgb_red, GPIO.OUT)
GPIO.setup(rgb_green, GPIO.OUT)
GPIO.setup(rgb_blue, GPIO.OUT)
# Setup PWM for RGB LED, Frequency is 200Hz
red_PWM = GPIO.PWM(rgb_red, 200)
green_PWM = GPIO.PWM(rgb_green, 200)
blue_PWM = GPIO.PWM(rgb_blue, 200)
# Setup PWM for Buzzer
GPIO.setup(buzzer, GPIO.IN)
GPIO.setup(buzzer, GPIO.OUT)
buzz_PWM = GPIO.PWM(buzzer, 0.001) # Zero frequency for default
# Music tune define
# Tune list for playing
tune1 = [262,294,330,349,392,440,494,523, 587, 659,698,784,880,988,1047]
# Other param
current_mark = 3 # Default C-mark
duration = 0.1 # Sound duration
def led_on(led):
GPIO.output(led, 1)
def led_off(led):
GPIO.output(led, 0)
def led_reverse(led):
if GPIO.input(led) == 1:
GPIO.output(led, 0)
else:
GPIO.output(led, 1)
def red_reverse(channel):
led_reverse(red_led)
def yellow_reverse(channel):
led_reverse(yellow_led)
def green_reverse(channel):
led_reverse(green_led)
# Detects rising edge on button. ignores multiple rising edges in 200ms
GPIO.add_event_detect(red_button, GPIO.RISING, callback=red_reverse, bouncetime=200)
GPIO.add_event_detect(yellow_button, GPIO.RISING, callback=yellow_reverse, bouncetime=200)
GPIO.add_event_detect(green_button, GPIO.RISING, callback=green_reverse, bouncetime=200)
def btn_click(btn):
if GPIO.input(btn) == False:
return True
else:
return False
def flash(r,y,g,n):
for i in xrange(0,n):
print "3"
led_on(red_led)
time.sleep(r) # Red time
led_off(red_led)
print "2"
led_on(yellow_led)
time.sleep(y) # Yellow time
led_off(yellow_led)
print "1"
led_on(green_led)
time.sleep(g) # Green time
led_off(green_led)
def setRGB(r, g, b):
r_v = r / 2.55
g_v = g / 2.55
b_v = b / 2.55
red_PWM.ChangeDutyCycle(r_v)
green_PWM.ChangeDutyCycle(g_v)
blue_PWM.ChangeDutyCycle(b_v)
def gradient(t):
for (x,y) in zip(xrange(1,255), xrange(255,1,-1)):
setRGB(y, x, 0) # red -> green
time.sleep(t)
for (x,y) in zip(xrange(1,255), xrange(255,1,-1)):
setRGB(0, y, x) # green -> blue
time.sleep(t)
for (x,y) in zip(xrange(1,255), xrange(255,1,-1)):
setRGB(x, 0, y) # blue -> red
time.sleep(t)
def buzz(pitch, duration):
if(pitch==0):
time.sleep(duration)
return
period = 1.0 / pitch #in physics, the period (sec/cyc) is the inverse of the frequency (cyc/sec)
delay = period / 2 #calcuate the time for half of the wave
cycles = int(duration * pitch) #the number of waves to produce is the duration times the frequency
for i in xrange(cycles):
GPIO.output(buzzer, 1)
time.sleep(delay)
GPIO.output(buzzer, 0)
time.sleep(delay)
def play(tune):
for p in tune:
buzz(p, duration) #feed the pitch and duration to the function, "buzz"
time.sleep(duration *0.5)
for p in reversed(tune):
buzz(p, duration)
time.sleep(duration *0.5)
# Define the music note, with mark(A to 1,B to 2...) and pitch (1-7)
def note(mark, pitch):
if (mark > 0 & mark < 8 & pitch > 0 & pitch < 8):
mark-=1
pitch-=1
else:
return 0 # check for mark and pitch input
return tuneList[mark, pitch]
def upTune():
current_mark = current_mark > 6 ? 1 : current_mark + 1
def downTune():
current_mark = current_mark < 2 ? 7 : current_mark - 1
def start():
print "Let's begin! Count 3 seconds..."
flash(1,1,1,1) # flash red-yellow-blue LED
red_PWM.start(0)
green_PWM.start(0)
buzz_PWM.start(50)
print "Start ! Click button and play"
def stop():
red_PWM.stop()
green_PWM.stop()
blue_PWM.stop()
print "Stop"
GPIO.cleanup()
def main():
try:
start()
while True:
gradient(0.005)
play(tune1)
except KeyboardInterrupt:
stop()
main()
|
lizhuoli1126/MarkdownScript
|
Raspberry Pi/reaction.py
|
Python
|
mit
| 4,439
|
#!/usr/bin/python
import ansible.runner
import ansible.playbook
import ansible.inventory
from ansible import callbacks
from ansible import utils
import json
# the fastest way to set up the inventory
# hosts list
hosts = ["127.0.0.1"]
# set up the inventory, if no group is defined then 'all' group is used by default
example_inventory = ansible.inventory.Inventory(hosts)
pm = ansible.runner.Runner(
module_name = 'command',
module_args = 'uname -a',
timeout = 5,
inventory = example_inventory,
subset = 'all', # name of the hosts group
private_key_file = "Host's private key",
remote_user = 'user to login with, root is default user',
remote_pass = 'user password'
)
out = pm.run()
print json.dumps(out, sort_keys=True, indent=4, separators=(',', ': '))
|
oriolrius/programming-ansible-basics
|
test_modules.py
|
Python
|
mit
| 798
|
# -*- coding: utf-8 -*-
"""
flask_konch
~~~~~~~~~~~
An improved shell commmand for the Flask CLI.
:copyright: (c) 2017 by Steven Loria
:license: MIT, see LICENSE for more details.
"""
__version__ = '1.2.0.post0'
__author__ = 'Steven Loria'
__license__ = 'MIT'
__all__ = [
'EXTENSION_NAME',
]
EXTENSION_NAME = 'flask-konch'
|
sbhtw/flask-konch
|
flask_konch/__init__.py
|
Python
|
mit
| 351
|
import os
import sys
from flask import Flask, Response
from flask.ext.cors import CORS
from google.protobuf.descriptor_pb2 import FileDescriptorSet
from google.protobuf.json_format import MessageToJson
base_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
path = os.path.join(base_path, 'pb')
sys.path.append(os.path.normpath(path))
app = Flask(__name__)
CORS(app)
bundle_path = os.path.join(base_path, 'proto_bundle')
local_path = os.path.join('./', 'proto_bundle')
@app.route('/google/proto-bundle')
def fetchGoogleProtos():
raw_json = fetchProtoJSON(app.config['proto-google'])
resp = Response(
response=raw_json,
status=200,
mimetype="application/json"
)
return resp
@app.route('/local/proto-bundle')
def fetchLocalProtos():
raw_json = fetchProtoJSON(app.config['proto-bundle'])
resp = Response(
response=raw_json,
status=200,
mimetype="application/json"
)
return resp
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/<path:path>')
def catch_all(path):
return app.send_static_file('index.html')
def fetchProtoJSON(the_path):
with open(the_path, 'rb') as f:
raw_proto = FileDescriptorSet()
raw_proto.ParseFromString(f.read())
f.close()
return MessageToJson(raw_proto)
|
opendoor-labs/pilgrim3
|
pilgrim3/app.py
|
Python
|
mit
| 1,402
|
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
import subprocess
import thread
import click
from concurrent.futures import ProcessPoolExecutor, as_completed
from dateutil.parser import parse as parse_date
from elasticsearch import Elasticsearch, helpers, RequestsHttpConnection
import jsonschema
import sqlite3
import yaml
from c7n.credentials import assumed_session, SessionFactory
from c7n.executor import ThreadPoolExecutor
from c7n.utils import local_session
log = logging.getLogger('c7n.trailes')
CONFIG_SCHEMA = {
'type': 'object',
'additionalProperties': True,
'properties': {
'indexer': {
'type': 'object',
'required': ['host', 'port', 'idx_name'],
'properties': {
'host': {'type': 'string'},
'port': {'type': 'number'},
'user': {'type': 'string'},
'password': {'type': 'string'},
'idx_name': {'type': 'string'},
'query': {'type': 'string'}
},
'additionalProperties': True
},
'accounts': {
'type': 'array',
'items': {
'type': 'object',
'anyOf': [
{"required": ['profile']},
{"required": ['role']}
],
'required': ['name', 'bucket', 'regions', 'title'],
'properties': {
'name': {'type': 'string'},
'title': {'type': 'string'},
'tags': {'type': 'object'},
'bucket': {'type': 'string'},
'regions': {'type': 'array', 'items': {'type': 'string'}}
}
}
}
}
}
def get_es_client(config):
host = [config['indexer'].get('host', 'localhost')]
es_kwargs = {}
es_kwargs['connection_class'] = RequestsHttpConnection
user = config['indexer'].get('user', False)
password = config['indexer'].get('password', False)
if user and password:
es_kwargs['http_auth'] = (user, password)
es_kwargs['port'] = config['indexer'].get('port', 9200)
return Elasticsearch(host, **es_kwargs)
def index_events(client, events):
results = helpers.streaming_bulk(client, events)
for status, r in results:
if not status:
log.debug("index err result %s", r)
def dict_factory(cursor, row):
"""Returns a sqlite row factory that returns a dictionary"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def fetch_events(cursor, config, account_name):
"""Generator that returns the events"""
query = config['indexer'].get('query',
'select * from events where user_agent glob \'*CloudCustodian*\'')
for event in cursor.execute(query):
event['account'] = account_name
event['_index'] = config['indexer']['idx_name']
event['_type'] = config['indexer'].get('idx_type', 'traildb')
yield event
def get_traildb(bucket, key, session_factory, directory):
local_db_file = directory + "/traildb" + \
str(thread.get_ident())
local_bz2_file = local_db_file + '.bz2'
s3 = local_session(session_factory).resource('s3')
s3.Bucket(bucket).download_file(key['Key'], local_bz2_file)
# Decompress the traildb file
# (use the system bunzip2 command because it's faster than the python bzip2 library)
subprocess.call(['bunzip2', '-f', local_bz2_file])
return local_db_file
def valid_date(key, config_date):
""" traildb bucket folders are not zero-padded so this validation
checks that the keys returned by the paginator are
*really after* the config date
"""
key_date = "/".join(key.split("/")[4:7])
return parse_date(key_date) > parse_date(config_date)
def index_account_trails(config, account, region, date, directory):
es_client = get_es_client(config)
s3 = local_session(
lambda: SessionFactory(region, profile=account.get('profile'),
assume_role=account.get('role'))()).client('s3')
bucket = account['bucket']
key_prefix = "accounts/{}/{}/traildb".format(account['name'], region)
marker = "{}/{}/trail.db.bz2".format(key_prefix, date)
p = s3.get_paginator('list_objects_v2').paginate(
Bucket=bucket,
Prefix=key_prefix,
StartAfter=marker,
)
with ThreadPoolExecutor(max_workers=20) as w:
for key_set in p:
if 'Contents' not in key_set:
continue
keys = []
for k in key_set['Contents']:
if (k['Key'].endswith('trail.db.bz2') and valid_date(k['Key'], date)):
keys.append(k)
futures = map(lambda k: w.submit(
get_traildb, bucket, k,
lambda: SessionFactory(region, profile=account.get('profile'),
assume_role=account.get('role'))(), directory),
keys)
for f in as_completed(futures):
local_db_file = f.result()
connection = sqlite3.connect(local_db_file)
connection.row_factory = dict_factory
cursor = connection.cursor()
index_events(es_client, fetch_events(cursor, config, account['name']))
connection.close()
try:
os.remove(local_db_file)
except:
log.warning("Failed to remove temporary file: {}".format(
local_db_file))
pass
def get_date_path(date, delta=0):
# optional input, use default time delta if not provided
# delta is 24 hours for trail
if not date:
date = datetime.datetime.utcnow() - datetime.timedelta(hours=delta)
elif date and not isinstance(date, datetime.datetime):
date = parse_date(date)
# note that traildb doesn't use leading zero
return date.strftime('%Y/%-m/%-d')
@click.group()
def trailes():
"""TrailDB Elastic Search"""
@trailes.command()
@click.option('-c', '--config', required=True, help="Config file")
@click.option('--date', required=False, help="Start date")
@click.option('--directory', required=False, help="Path for temp db file")
@click.option('--concurrency', default=5)
@click.option('-a', '--accounts', multiple=True)
@click.option('-t', '--tag')
@click.option('--verbose/--no-verbose', default=False)
def index(
config, date=None, directory=None, concurrency=5, accounts=None,
tag=None, verbose=False):
"""index traildbs directly from s3 for multiple accounts.
context: assumes a daily traildb file in s3 with dated key path
"""
logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('c7n.worker').setLevel(logging.INFO)
with open(config) as fh:
config = yaml.safe_load(fh.read())
jsonschema.validate(config, CONFIG_SCHEMA)
date = get_date_path(date, delta=24)
directory = directory or "/tmp"
with ProcessPoolExecutor(max_workers=concurrency) as w:
futures = {}
jobs = []
for account in config.get('accounts'):
if accounts and account['name'] not in accounts:
continue
if tag:
found = False
for t in account['tags'].values():
if tag == t:
found = True
break
if not found:
continue
for region in account.get('regions'):
p = (config, account, region, date, directory)
jobs.append(p)
for j in jobs:
log.debug("submit account:{} region:{} date:{}".format(
j[1]['name'], j[2], j[3]))
futures[w.submit(index_account_trails, *j)] = j
# Process completed
for f in as_completed(futures):
config, account, region, date, directory = futures[f]
if f.exception():
log.warning("error account:{} region:{} error:{}".format(
account['name'], region, f.exception()))
continue
log.info("complete account:{} region:{}".format(
account['name'], region))
if __name__ == '__main__':
trailes(auto_envvar_prefix='TRAIL')
|
jdubs/cloud-custodian
|
tools/c7n_traildb/trailes.py
|
Python
|
apache-2.0
| 9,204
|
from common import pack
class Decimator:
def __init__(self, L_b, D=1):
assert L_b % D == 0, (
'Decimation block length must be a multiple of the dec. factor')
self._D = D
self._pkr = pack.Packer(L_b)
def push(self, x):
packed = self._pkr.push(x)
if packed is not None:
return packed[:, : : self._D]
return None
|
cuauv/software
|
hydrocode/modules/pinger/decimate.py
|
Python
|
bsd-3-clause
| 395
|
# This file is part of Pebble.
# Pebble is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
# Pebble is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with Pebble. If not, see <http://www.gnu.org/licenses/>.
import threading
from time import time
from types import MethodType
_waitforthreads_lock = threading.Lock()
def waitfortasks(tasks, timeout=None):
"""Waits for one or more *Task* to be ready or until *timeout* expires.
*tasks* is a list containing one or more *pebble.Task* objects.
If *timeout* is not None the function will block
for the specified amount of seconds.
The function returns a list containing the ready *Tasks*.
"""
lock = threading.Condition(threading.Lock())
prepare_tasks(tasks, lock)
try:
wait_tasks(tasks, lock, timeout)
finally:
reset_tasks(tasks)
return filter(lambda t: t.ready, tasks)
def prepare_tasks(tasks, lock):
"""Replaces task._set() method in order to notify the waiting Condition."""
for task in tasks:
task._pebble_lock = lock
with task._task_ready:
task._pebble_old_method = task._set
task._set = MethodType(new_method, task)
def wait_tasks(tasks, lock, timeout):
with lock:
if not any(map(lambda t: t.ready, tasks)):
lock.wait(timeout)
def reset_tasks(tasks):
"""Resets original task._set() method."""
for task in tasks:
with task._task_ready:
task._set = task._pebble_old_method
delattr(task, '_pebble_old_method')
delattr(task, '_pebble_lock')
def waitforqueues(queues, timeout=None):
"""Waits for one or more *Queue* to be ready or until *timeout* expires.
*queues* is a list containing one or more *Queue.Queue* objects.
If *timeout* is not None the function will block
for the specified amount of seconds.
The function returns a list containing the ready *Queues*.
"""
lock = threading.Condition(threading.Lock())
prepare_queues(queues, lock)
try:
wait_queues(queues, lock, timeout)
finally:
reset_queues(queues)
return filter(lambda q: not q.empty(), queues)
def prepare_queues(queues, lock):
"""Replaces queue._put() method in order to notify the waiting Condition."""
for queue in queues:
queue._pebble_lock = lock
with queue.mutex:
queue._pebble_old_method = queue._put
queue._put = MethodType(new_method, queue)
def wait_queues(queues, lock, timeout):
with lock:
if not any(map(lambda q: not q.empty(), queues)):
lock.wait(timeout)
def reset_queues(queues):
"""Resets original queue._put() method."""
for queue in queues:
with queue.mutex:
queue._put = queue._pebble_old_method
delattr(queue, '_pebble_old_method')
delattr(queue, '_pebble_lock')
def waitforthreads(threads, timeout=None):
"""Waits for one or more *Thread* to exit or until *timeout* expires.
.. note::
Expired *Threads* are not joined by *waitforthreads*.
*threads* is a list containing one or more *threading.Thread* objects.
If *timeout* is not None the function will block
for the specified amount of seconds.
The function returns a list containing the ready *Threads*.
"""
old_function = None
lock = threading.Condition(threading.Lock())
def new_function(*args):
old_function(*args)
with lock:
lock.notify_all()
old_function = prepare_threads(new_function)
try:
wait_threads(threads, lock, timeout)
finally:
reset_threads(old_function)
return filter(lambda t: not t.is_alive(), threads)
def prepare_threads(new_function):
"""Replaces threading._get_ident() function in order to notify
the waiting Condition."""
with _waitforthreads_lock:
if hasattr(threading, 'get_ident'):
old_function = threading.get_ident
threading.get_ident = new_function
else:
old_function = threading._get_ident
threading._get_ident = new_function
return old_function
def wait_threads(threads, lock, timeout):
timestamp = time()
time_left = lambda: timeout - (time() - timestamp)
with lock:
while not any(map(lambda t: not t.is_alive(), threads)):
if timeout is None:
lock.wait()
elif time_left() > 0:
lock.wait(time_left())
else:
return
def reset_threads(old_function):
"""Resets original threading._get_ident() function."""
with _waitforthreads_lock:
if hasattr(threading, 'get_ident'):
threading.get_ident = old_function
else:
threading._get_ident = old_function
def new_method(self, *args):
self._pebble_old_method(*args)
with self._pebble_lock:
self._pebble_lock.notify_all()
|
villind/pebble
|
pebble/functions.py
|
Python
|
lgpl-3.0
| 5,408
|
"""
This module will send messages to the facebook servers which in turn will send those messages to the user whom's user
is passed.
Messages can pe pure text based or be other types like images,videos,location and some special ones from Facebook i.e
Templates
"""
import os
from .exception import raise_error, QuickReplyCountExceeded, ElementCountExceeded, CharacterCountExceeded, \
ButtonCountExceeded
try:
import ujson as json
except ImportError:
import json # type: ignore
import requests
import logging
logger = logging.getLogger(__name__)
headers = {"Content-Type": "application/json"}
class Send:
def __init__(self, page_access_token, api_ver=None):
"""
Initialize the send class with page_access_token and api_version(optional) so that you can use the obtained
instance to send messages of different types
@required
:param page_access_token: The page access token for the bot
@optional
:type page_access_token: str
:param api_ver: api version you want to use
:type api_ver: int,float
"""
if api_ver:
assert isinstance(api_ver, (int, float)), "type of api version is not float or integer"
else:
api_ver = 2.9
self.URL = 'https://graph.facebook.com/v{}/'.format(api_ver) + '{}'
self.Access_Token = page_access_token
def send_text(self, user_id, message, notification_type='REGULAR', quick_replies=None):
"""
@required
:param user_id: user_id of the recipient
:type user_id: string
:param message: Message text
:type message: string
:param notification_type: Push notification type: REGULAR, SILENT_PUSH, or NO_PUSH
:type notification_type: string
:param quick_replies: a list containing number of quick replies.(Up to 11)
:return: return response from facebook or type of error if encountered
"""
url = self.URL.format("me/messages")
payload = {}
params = {"access_token": self.Access_Token}
header = {"Content-Type": "application/json"}
payload['recipient'] = {"id": user_id}
payload['message'] = {"text": message}
if quick_replies is not None:
if len(quick_replies) > 11:
raise QuickReplyCountExceeded("The maximum numbers of quick replies allowed are 11")
payload["message"]["quick_replies"] = quick_replies
payload["notification_type"] = notification_type
response = requests.post(url, headers=header, params=params, data=json.dumps(payload))
result = response.json()
if 'recipient_id' not in result:
error = raise_error(result)
raise error
else:
return result
def send_attachment(self, user_id, attachment_type, url=None, file=None, notification_type='REGULAR',
quick_replies=None):
"""
@required
:param user_id: user_id of the recipient
:param attachment_type: Type of attachment, may be image, audio, video, file or template
:type attachment_type: str
:param url: URL of data
:param file: path of attachment in the directory
:type file: str
@optional
:param notification_type: Push notification type: REGULAR, SILENT_PUSH, or NO_PUSH.
:type notification_type: string
:param quick_replies: a list containing number of quick replies.(Up to 11).
:return: response from facebook or type of error if encountered.
"""
payload = {
"recipient": {
"id": user_id
},
"message": {
"attachment": {
"type": attachment_type,
"payload": {}
}
},
}
if url is not None:
payload["message"]["attachment"]["payload"]["url"] = url
elif file is not None:
payload["filedata"] = (os.path.basename(file), open(file, 'rb'))
else:
raise Exception("Please pass argument for 'url' or 'file'")
if quick_replies is not None:
if len(quick_replies) > 11:
raise QuickReplyCountExceeded("The maximum numbers of quick replies allowed are 11")
payload["message"]["quick_replies"] = quick_replies
payload["notification_type"] = notification_type
params = {"access_token": self.Access_Token}
URL = self.URL.format("me/messages")
response = requests.post(URL, headers=headers, params=params, data=json.dumps(payload))
result = response.json()
if "recipient_id" not in result:
error = raise_error(result)
raise error
else:
return result
def sender_action(self, user_id, action="mark_seen"):
"""
Sender Action of Facebook bot API.
For more info https://developers.facebook.com/docs/messenger-platform/send-api-reference/sender-actions
:param user_id: User id of the person who is going to receive the action
:type user_id: str
:param action: type of sender action
:type action: str
"""
if not isinstance(action, str):
raise ValueError
payload = {
"recipient": {
"id": user_id
},
"sender_action": action
}
url = self.URL.format("me/messages")
param = {"access_token": self.Access_Token}
response = requests.post(url, headers=headers, params=param, data=json.dumps(payload))
data = response.json()
return data
def get_user_info(self, user_id):
"""
The User Profile API lets your bot get more information about the user
for more info go to https://developers.facebook.com/docs/messenger-platform/user-profile
:param user_id: User id of the person of whom user info is to be retrieved.
:type user_id: str
:return: first name,last name,profile pic,locale,timezone,gender.
"""
url = self.URL.format(user_id)
key = {"fields": "first_name,last_name,profile_pic,locale,timezone,gender",
"access_token": self.Access_Token
}
response = requests.get(url, params=key)
data = response.json()
try:
data = json.decode(data)
except:
pass
try:
return data["first_name"], data["last_name"], data["profile_pic"], data["locale"], data["timezone"], data[
"gender"]
except:
return None
def send_button_template(self, user_id, text, buttons, quick_replies=None):
"""
https://developers.facebook.com/docs/messenger-platform/send-api-reference/button-template
:param user_id: User Id of the recipient to whom the message is being sent.
:param text: UTF-8 encoded text of up to 640 characters that appears the in main body.
:type text: str
:param buttons: Set of, one to three, buttons that appear as call-to-actions.
:type buttons: Dict | list[dict]
:param quick_replies: a list containing number of quick replies.(Up to 11)
:return:
"""
assert isinstance(text, str), "text argument is not a string"
if len(text) > 640:
raise CharacterCountExceeded(
"The number of characters in the text argument passed are %r. But maximum allowed is up to 640" % len(
text))
if isinstance(buttons, list):
if len(buttons) > 3:
raise ButtonCountExceeded("Max numbers of buttons allowed are 3.number of buttons pass %r ") % len(
buttons)
for index, each_button in enumerate(buttons):
assert isinstance(each_button, dict), "expected dict,passed %s" % type(each_button)
try:
each_button = json.loads(each_button)
except:
pass
finally:
buttons[index] = each_button
elif isinstance(buttons, dict):
buttons = [buttons]
payload = {
"recipient": {
"id": user_id
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": text,
"buttons": buttons
}
}
}
}
if quick_replies is not None:
if len(quick_replies) > 11:
raise QuickReplyCountExceeded("The maximum numbers of quick replies allowed are 11")
payload["message"]["quick_replies"] = quick_replies
url = self.URL.format("me/messages") # type: str
params = {"access_token": self.Access_Token} # type: dict
response = requests.post(url, headers=headers, params=params, data=json.dumps(payload))
data = response.json()
if not data.get("recipient_id"):
error = raise_error(data)
raise error
else:
return data
def send_generic_template(self, user_id, elements, quick_replies=None):
"""
For more info go to https://developers.facebook.com/docs/messenger-platform/send-api-reference/generic-template
:param user_id: User Id of the recipient to whom the message is being sent.
:type user_id: str
:param elements: a list of generic templates(up to 10).
:param quick_replies: a list containing number of quick replies.(Up to 11)
Element: Data for each bubble in message
:return:
"""
assert isinstance(elements, list), "Make sure elements is a list of generic templates"
if len(elements) > 10:
raise ElementCountExceeded("The max number of templates allowed are 10.But, %s are given" % len(elements))
payload = {
"recipient": {
"id": user_id
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"image_aspect_ratio": "horizontal",
"elements": elements
}
}
}
}
if quick_replies is not None:
if len(quick_replies) > 11:
raise QuickReplyCountExceeded("The maximum numbers of quick replies allowed are 11")
payload["message"]["quick_replies"] = quick_replies
url = self.URL.format("me/messages")
params = {"access_token": self.Access_Token}
response = requests.post(url, headers=headers, params=params, data=json.dumps(payload))
data = response.json()
if not data.get("recipient_id"):
error = raise_error(data)
raise error
else:
return data
def send_list_template(self, user_id, elements, top_element_style="large", quick_replies=None):
"""
For more info go to https://developers.facebook.com/docs/messenger-platform/send-api-reference/list-template
:param user_id: User Id of the recipient to whom the message is being sent.
:type user_id: str
:param top_element_style: Value must be large or compact. Default to large if not specified.
:type top_element_style: enum
:param elements: List of view elements (maximum of 4 elements and minimum of 2 elements).
:param quick_replies: a list containing number of quick replies.(Up to 11)
:return:
"""
payload = {
"recipient": {
"id": user_id
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "list",
"top_element_style": top_element_style,
"elements": elements
}
}
}
}
if quick_replies is not None:
if len(quick_replies) > 11:
raise QuickReplyCountExceeded("The maximum numbers of quick replies allowed are 11")
payload["message"]["quick_replies"] = quick_replies
url = self.URL.format("me/messages")
params = {"access_token": self.Access_Token}
response = requests.post(url, headers=headers, params=params, data=json.dumps(payload))
data = response.json()
if not data.get("recipient_id"):
error = raise_error(data)
raise error
else:
return data
|
hundredeir/Facebook_PyBot
|
Facebook/send.py
|
Python
|
lgpl-3.0
| 13,018
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv,fields
class folio_report_wizard(osv.osv_memory):
_name = 'folio.report.wizard'
_rec_name = 'date_start'
_columns = {
'date_start':fields.datetime('Start Date'),
'date_end':fields.datetime('End Date')
}
def print_report(self, cr, uid, ids, context=None):
datas = {
'ids': ids,
'model': 'hotel.folio',
'form': self.read(cr, uid, ids)[0]
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'folio.total',
'datas': datas,
}
folio_report_wizard()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
BorgERP/borg-erp-6of3
|
verticals/hotel61/hotel/wizard/hotel_wizard.py
|
Python
|
agpl-3.0
| 1,725
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('movielists', '0017_auto_20150412_1342'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('comment_text', models.CharField(max_length=200)),
('comment_pub_date', models.DateTimeField(verbose_name=b'date published')),
('movie', models.ForeignKey(to='movielists.Movie')),
],
options={
},
bases=(models.Model,),
),
]
|
kiriakosv/movie-recommendator
|
moviesite/movielists/migrations/0018_comment.py
|
Python
|
mit
| 775
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack.compiler import Compiler, get_compiler_version
class Nag(Compiler):
# Subclasses use possible names of C compiler
cc_names = []
# Subclasses use possible names of C++ compiler
cxx_names = []
# Subclasses use possible names of Fortran 77 compiler
f77_names = ['nagfor']
# Subclasses use possible names of Fortran 90 compiler
fc_names = ['nagfor']
# Named wrapper links within spack.build_env_path
# Use default wrappers for C and C++, in case provided in compilers.yaml
link_paths = {
'cc': 'cc',
'cxx': 'c++',
'f77': 'nag/nagfor',
'fc': 'nag/nagfor'}
@property
def openmp_flag(self):
return "-openmp"
@property
def cxx11_flag(self):
# NAG does not have a C++ compiler
# However, it can be mixed with a compiler that does support it
return "-std=c++11"
@property
def pic_flag(self):
return "-PIC"
# Unlike other compilers, the NAG compiler passes options to GCC, which
# then passes them to the linker. Therefore, we need to doubly wrap the
# options with '-Wl,-Wl,,'
@property
def f77_rpath_arg(self):
return '-Wl,-Wl,,-rpath,,'
@property
def fc_rpath_arg(self):
return '-Wl,-Wl,,-rpath,,'
@classmethod
def default_version(self, comp):
"""The ``-V`` option works for nag compilers.
Output looks like this::
NAG Fortran Compiler Release 6.0(Hibiya) Build 1037
Product NPL6A60NA for x86-64 Linux
"""
return get_compiler_version(
comp, '-V', r'NAG Fortran Compiler Release ([0-9.]+)')
|
EmreAtes/spack
|
lib/spack/spack/compilers/nag.py
|
Python
|
lgpl-2.1
| 2,901
|
from molly.conf.provider import Provider
class BaseGeolocationProvider(Provider):
def reverse_geocode(self, lon, lat):
return []
def geocode(self, query):
return []
from cloudmade import CloudmadeGeolocationProvider
from places import PlacesGeolocationProvider
|
mollyproject/mollyproject
|
molly/geolocation/providers/__init__.py
|
Python
|
apache-2.0
| 308
|
from django.db import models
class PeriodCancellation(models.Model):
name = models.CharField(max_length=50, blank=True, null=True)
name.help_text = "Recommended. Makes identifying the cancellation easier. E.g. Monday after Easter"
period = models.ForeignKey('Period', related_name='cancellations', on_delete=models.CASCADE)
date = models.DateField(blank=False, null=False)
def __str__(self):
date_string = "{}".format(self.date.strftime('%d.%m.%Y'))
if self.name:
return "{} ({})".format(self.name, date_string)
return date_string
|
gitsimon/tq_website
|
courses/models/period_cancellation.py
|
Python
|
gpl-2.0
| 590
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import posixpath, os, urllib, re
from urlparse import urlparse
from threading import Thread
from Queue import Queue, Empty
from PyQt5.Qt import QPixmap, Qt, QDialog, QLabel, QVBoxLayout, \
QDialogButtonBox, QProgressBar, QTimer, QUrl
from calibre.constants import DEBUG, iswindows
from calibre.ptempfile import PersistentTemporaryFile
from calibre import browser, as_unicode, prints
from calibre.gui2 import error_dialog
from calibre.utils.imghdr import what
IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'gif', 'png', 'bmp']
class Worker(Thread): # {{{
def __init__(self, url, fpath, rq):
Thread.__init__(self)
self.url, self.fpath = url, fpath
self.daemon = True
self.rq = rq
self.err = self.tb = None
def run(self):
try:
br = browser()
br.retrieve(self.url, self.fpath, self.callback)
except Exception as e:
self.err = as_unicode(e)
import traceback
self.tb = traceback.format_exc()
def callback(self, a, b, c):
self.rq.put((a, b, c))
# }}}
class DownloadDialog(QDialog): # {{{
def __init__(self, url, fname, parent):
QDialog.__init__(self, parent)
self.setWindowTitle(_('Download %s')%fname)
self.l = QVBoxLayout(self)
self.purl = urlparse(url)
self.msg = QLabel(_('Downloading <b>%(fname)s</b> from %(url)s')%dict(
fname=fname, url=self.purl.netloc))
self.msg.setWordWrap(True)
self.l.addWidget(self.msg)
self.pb = QProgressBar(self)
self.pb.setMinimum(0)
self.pb.setMaximum(0)
self.l.addWidget(self.pb)
self.bb = QDialogButtonBox(QDialogButtonBox.Cancel, Qt.Horizontal, self)
self.l.addWidget(self.bb)
self.bb.rejected.connect(self.reject)
sz = self.sizeHint()
self.resize(max(sz.width(), 400), sz.height())
fpath = PersistentTemporaryFile(os.path.splitext(fname)[1])
fpath.close()
self.fpath = fpath.name
self.worker = Worker(url, self.fpath, Queue())
self.rejected = False
def reject(self):
self.rejected = True
QDialog.reject(self)
def start_download(self):
self.worker.start()
QTimer.singleShot(50, self.update)
self.exec_()
if self.worker.err is not None:
error_dialog(self.parent(), _('Download failed'),
_('Failed to download from %(url)r with error: %(err)s')%dict(
url=self.worker.url, err=self.worker.err),
det_msg=self.worker.tb, show=True)
def update(self):
if self.rejected:
return
try:
progress = self.worker.rq.get_nowait()
except Empty:
pass
else:
self.update_pb(progress)
if not self.worker.is_alive():
return self.accept()
QTimer.singleShot(50, self.update)
def update_pb(self, progress):
transferred, block_size, total = progress
if total == -1:
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.pb.setValue(0)
else:
so_far = transferred * block_size
self.pb.setMaximum(max(total, so_far))
self.pb.setValue(so_far)
@property
def err(self):
return self.worker.err
# }}}
def dnd_has_image(md):
# Chromium puts image data into application/octet-stream
return md.hasImage() or md.hasFormat('application/octet-stream') and what(None, bytes(md.data('application/octet-stream'))) in IMAGE_EXTENSIONS
def data_as_string(f, md):
raw = bytes(md.data(f))
if '/x-moz' in f:
try:
raw = raw.decode('utf-16')
except:
pass
return raw
def urls_from_md(md):
ans = list(md.urls())
if md.hasText():
# Chromium returns the url as text/plain on drag and drop of image
text = md.text()
if text and text.lstrip().partition(':')[0] in {'http', 'https', 'ftp'}:
u = QUrl(text.strip())
if u.isValid():
ans.append(u)
return ans
def path_from_qurl(qurl):
raw = bytes(qurl.toEncoded(
QUrl.PreferLocalFile | QUrl.RemoveScheme | QUrl.RemovePassword | QUrl.RemoveUserInfo |
QUrl.RemovePort | QUrl.RemoveAuthority | QUrl.RemoveQuery | QUrl.RemoveFragment))
ans = urllib.unquote(raw).decode('utf-8', 'replace')
if iswindows and ans.startswith('/'):
ans = ans[1:]
return ans
def remote_urls_from_qurl(qurls, allowed_exts):
for qurl in qurls:
if qurl.scheme() in {'http', 'https', 'ftp'} and posixpath.splitext(
qurl.path())[1][1:].lower() in allowed_exts:
yield bytes(qurl.toEncoded()), posixpath.basename(qurl.path())
def dnd_has_extension(md, extensions):
if DEBUG:
prints('\nDebugging DND event')
for f in md.formats():
f = unicode(f)
raw = data_as_string(f, md)
prints(f, len(raw), repr(raw[:300]), '\n')
print ()
if has_firefox_ext(md, extensions):
return True
urls = urls_from_md(md)
paths = [path_from_qurl(u) for u in urls]
exts = frozenset([posixpath.splitext(u)[1][1:].lower() for u in paths if u])
if DEBUG:
repr_urls = [bytes(u.toEncoded()) for u in urls]
prints('URLS:', repr(repr_urls))
prints('Paths:', paths)
prints('Extensions:', exts)
return bool(exts.intersection(frozenset(extensions)))
def dnd_get_image(md, image_exts=IMAGE_EXTENSIONS):
'''
Get the image in the QMimeData object md.
:return: None, None if no image is found
QPixmap, None if an image is found, the pixmap is guaranteed not
null
url, filename if a URL that points to an image is found
'''
if md.hasImage():
for x in md.formats():
x = unicode(x)
if x.startswith('image/'):
cdata = bytes(md.data(x))
pmap = QPixmap()
pmap.loadFromData(cdata)
if not pmap.isNull():
return pmap, None
break
if md.hasFormat('application/octet-stream'):
cdata = bytes(md.data('application/octet-stream'))
pmap = QPixmap()
pmap.loadFromData(cdata)
if not pmap.isNull():
return pmap, None
# No image, look for an URL pointing to an image
urls = urls_from_md(md)
paths = [path_from_qurl(u) for u in urls]
# First look for a local file
images = [xi for xi in paths if
posixpath.splitext(urllib.unquote(xi))[1][1:].lower() in
image_exts]
images = [xi for xi in images if os.path.exists(xi)]
p = QPixmap()
for path in images:
try:
with open(path, 'rb') as f:
p.loadFromData(f.read())
except Exception:
continue
if not p.isNull():
return p, None
# No local images, look for remote ones
# First, see if this is from Firefox
rurl, fname = get_firefox_rurl(md, image_exts)
if rurl and fname:
return rurl, fname
# Look through all remaining URLs
for remote_url, filename in remote_urls_from_qurl(urls, image_exts):
return remote_url, filename
return None, None
def dnd_get_files(md, exts):
'''
Get the file in the QMimeData object md with an extension that is one of
the extensions in exts.
:return: None, None if no file is found
[paths], None if a local file is found
[urls], [filenames] if URLs that point to a files are found
'''
# Look for a URL pointing to a file
urls = urls_from_md(md)
# First look for a local file
local_files = [path_from_qurl(x) for x in urls]
local_files = [p for p in local_files if
posixpath.splitext(urllib.unquote(p))[1][1:].lower() in
exts]
local_files = [x for x in local_files if os.path.exists(x)]
if local_files:
return local_files, None
# No local files, look for remote ones
# First, see if this is from Firefox
rurl, fname = get_firefox_rurl(md, exts)
if rurl and fname:
return [rurl], [fname]
# Look through all remaining URLs
rurls, filenames = [], []
for rurl, fname in remote_urls_from_qurl(urls, exts):
rurls.append(rurl), filenames.append(fname)
if rurls:
return rurls, filenames
return None, None
def _get_firefox_pair(md, exts, url, fname):
url = bytes(md.data(url)).decode('utf-16')
fname = bytes(md.data(fname)).decode('utf-16')
while url.endswith('\x00'):
url = url[:-1]
while fname.endswith('\x00'):
fname = fname[:-1]
if not url or not fname:
return None, None
ext = posixpath.splitext(fname)[1][1:].lower()
# Weird firefox bug on linux
ext = {'jpe':'jpg', 'epu':'epub', 'mob':'mobi'}.get(ext, ext)
fname = os.path.splitext(fname)[0] + '.' + ext
if DEBUG:
prints('Firefox file promise:', url, fname)
if ext not in exts:
fname = url = None
return url, fname
def get_firefox_rurl(md, exts):
formats = frozenset([unicode(x) for x in md.formats()])
url = fname = None
if 'application/x-moz-file-promise-url' in formats and \
'application/x-moz-file-promise-dest-filename' in formats:
try:
url, fname = _get_firefox_pair(md, exts,
'application/x-moz-file-promise-url',
'application/x-moz-file-promise-dest-filename')
except:
if DEBUG:
import traceback
traceback.print_exc()
if url is None and 'text/x-moz-url-data' in formats and \
'text/x-moz-url-desc' in formats:
try:
url, fname = _get_firefox_pair(md, exts,
'text/x-moz-url-data', 'text/x-moz-url-desc')
except:
if DEBUG:
import traceback
traceback.print_exc()
if url is None and '_NETSCAPE_URL' in formats:
try:
raw = bytes(md.data('_NETSCAPE_URL'))
raw = raw.decode('utf-8')
lines = raw.splitlines()
if len(lines) > 1 and re.match(r'[a-z]+://', lines[1]) is None:
url, fname = lines[:2]
ext = posixpath.splitext(fname)[1][1:].lower()
if ext not in exts:
fname = url = None
except:
if DEBUG:
import traceback
traceback.print_exc()
if DEBUG:
prints('Firefox rurl:', url, fname)
return url, fname
def has_firefox_ext(md, exts):
return bool(get_firefox_rurl(md, exts)[0])
|
sharad/calibre
|
src/calibre/gui2/dnd.py
|
Python
|
gpl-3.0
| 11,090
|
#!/usr/bin/python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['c', 'cc', 'h', 'cpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
for i in lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the //.
if Match(r'//[^ ]*\w', comment):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line)
if (match and match.group(1) != '(' and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is the end of template<>.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is end of a template parameter list, False otherwise.
"""
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for simple type and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']:
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
def IsRValueAllowed(clean_lines, linenum):
"""Check if RValue reference is allowed on a particular line.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if line is within the region where RValue references are allowed.
"""
# Allow region marked by PUSH/POP macros
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
# Allow operator=
line = clean_lines.elided[linenum]
if Search(r'\boperator\s*=\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
# Allow constructors
match = Match(r'\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
if match and match.group(1) == match.group(2):
return IsDeletedOrDefault(clean_lines, linenum)
if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
if Match(r'\s*[\w<>]+\s*\(', line):
previous_line = 'ReturnType'
if linenum > 0:
previous_line = clean_lines.elided[linenum - 1]
if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
return IsDeletedOrDefault(clean_lines, linenum)
return False
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
and_pos = len(match.group(1))
if IsRValueType(clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals and lambdas.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
# Suggest a different header for ostream
if include == 'ostream':
error(filename, linenum, 'readability/streams', 3,
'For logging, include "base/logging.h" instead of <ostream>.')
else:
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present,
# then it should be the last thing in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)]+)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
"""Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# A lambda introducer specifies a default capture if it starts with "[="
# or if it starts with "[&" _not_ followed by an identifier.
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# Found a potential error, check what comes after the lambda-introducer.
# If it's not open parenthesis (for lambda-declarator) or open brace
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4, # 4 = high confidence
'Default lambda captures are an unapproved C++ feature.')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*\bvirtual\b)', line)
if not virtual: return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(1))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Check that at most one of "override" or "final" is present, not both
line = clean_lines.elided[linenum]
if Search(r'\boverride\b', line) and Search(r'\bfinal\b', line):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Flag unapproved C++11 headers.
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
# utility
'forward',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
sys.stderr.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
|
weizhenwei/leetcode
|
tools/cpplint.py
|
Python
|
bsd-3-clause
| 235,110
|
from django.db import models
from muddery.worlddata import model_base
# ------------------------------------------------------------
#
# game's basic settings
#
# ------------------------------------------------------------
class game_settings(model_base.game_settings):
"""
Game's basic settings.
"""
pass
# ------------------------------------------------------------
#
# all class's categories
#
# ------------------------------------------------------------
class class_categories(model_base.class_categories):
"all class's categories"
pass
# ------------------------------------------------------------
#
# store all typeclasses
#
# ------------------------------------------------------------
class typeclasses(model_base.typeclasses):
"store all typeclasses"
pass
# ------------------------------------------------------------
#
# world areas
#
# ------------------------------------------------------------
class world_areas(model_base.world_areas):
"Rooms belongs to areas."
pass
#------------------------------------------------------------
#
# store all rooms
#
#------------------------------------------------------------
class world_rooms(model_base.world_rooms):
"Store all unique rooms."
pass
#------------------------------------------------------------
#
# store all exits
#
#------------------------------------------------------------
class world_exits(model_base.world_exits):
"Store all unique exits."
pass
#------------------------------------------------------------
#
# store exit locks
#
#------------------------------------------------------------
class exit_locks(model_base.exit_locks):
"Store all exit locks."
pass
#------------------------------------------------------------
#
# two way exit's additional data
#
#------------------------------------------------------------
class two_way_exits(model_base.two_way_exits):
"Store all exit locks."
pass
#------------------------------------------------------------
#
# store all objects
#
#------------------------------------------------------------
class world_objects(model_base.world_objects):
"Store all unique objects."
pass
#------------------------------------------------------------
#
# store all object creators
#
#------------------------------------------------------------
class object_creators(model_base.object_creators):
"Store all object creators."
pass
#------------------------------------------------------------
#
# object creator's loot list
#
#------------------------------------------------------------
class creator_loot_list(model_base.creator_loot_list):
"Object creator's loot list"
pass
#------------------------------------------------------------
#
# store all common objects
#
#------------------------------------------------------------
class common_objects(model_base.common_objects):
"Store all common objects."
pass
# ------------------------------------------------------------
#
# store all foods
#
# ------------------------------------------------------------
class foods(model_base.foods):
"Foods inherit from common objects."
pass
# ------------------------------------------------------------
#
# store all skill books
#
# ------------------------------------------------------------
class skill_books(model_base.skill_books):
"Skill books inherit from common objects."
pass
#------------------------------------------------------------
#
# store all equip_types
#
#------------------------------------------------------------
class equipment_types(model_base.equipment_types):
"Store all equip types."
pass
#------------------------------------------------------------
#
# store all equip_positions
#
#------------------------------------------------------------
class equipment_positions(model_base.equipment_positions):
"Store all equip types."
pass
#------------------------------------------------------------
#
# store all equipments
#
#------------------------------------------------------------
class equipments(model_base.equipments):
"Store all equipments."
# ------------------------------------------------------------
#
# store all careers
#
# ------------------------------------------------------------
class character_careers(model_base.character_careers):
"Store all careers."
pass
# ------------------------------------------------------------
#
# store career and equipment type's relationship
#
# ------------------------------------------------------------
class career_equipments(model_base.career_equipments):
"Store career and equipment type's relationship."
pass
# ------------------------------------------------------------
#
# character attributes
#
# ------------------------------------------------------------
class character_attributes_info(model_base.character_attributes_info):
"character attributes"
pass
# ------------------------------------------------------------
#
# Equipment attribute's information.
#
# ------------------------------------------------------------
class equipment_attributes_info(model_base.equipment_attributes_info):
"Equipment's all available attributes"
pass
# ------------------------------------------------------------
#
# Food attribute's information.
#
# ------------------------------------------------------------
class food_attributes_info(model_base.food_attributes_info):
"Food attribute's information."
pass
#------------------------------------------------------------
#
# character levels
#
#------------------------------------------------------------
class character_models(model_base.character_models):
"Store all character level informations."
pass
#------------------------------------------------------------
#
# store all npcs
#
#------------------------------------------------------------
class world_npcs(model_base.world_npcs):
"Store all unique objects."
pass
#------------------------------------------------------------
#
# store common characters
#
#------------------------------------------------------------
class common_characters(model_base.common_characters):
"Store all common characters."
pass
#------------------------------------------------------------
#
# character's loot list
#
#------------------------------------------------------------
class character_loot_list(model_base.character_loot_list):
"Character's loot list"
pass
#------------------------------------------------------------
#
# character's default objects
#
#------------------------------------------------------------
class default_objects(model_base.default_objects):
"Store character's default objects information."
pass
# ------------------------------------------------------------
#
# shops
#
# ------------------------------------------------------------
class shops(model_base.shops):
"Store all shops."
pass
# ------------------------------------------------------------
#
# shop goods
#
# ------------------------------------------------------------
class shop_goods(model_base.shop_goods):
"All goods that sold in shops."
pass
# ------------------------------------------------------------
#
# npc shops
#
# ------------------------------------------------------------
class npc_shops(model_base.npc_shops):
"Store npc's shops."
pass
#------------------------------------------------------------
#
# store all skills
#
#------------------------------------------------------------
class skills(model_base.skills):
"Store all skills."
pass
#------------------------------------------------------------
#
# character skills
#
#------------------------------------------------------------
class default_skills(model_base.default_skills):
"Store all character skill informations."
pass
#------------------------------------------------------------
#
# store all quests
#
#------------------------------------------------------------
class quests(model_base.quests):
"Store all dramas."
pass
#------------------------------------------------------------
#
# quest's reward list
#
#------------------------------------------------------------
class quest_reward_list(model_base.quest_reward_list):
"Quest's reward list"
pass
# ------------------------------------------------------------
#
# quest objective's type
#
# ------------------------------------------------------------
class quest_objective_types(model_base.quest_objective_types):
"quest objective's type"
pass
#------------------------------------------------------------
#
# store quest objectives
#
#------------------------------------------------------------
class quest_objectives(model_base.quest_objectives):
"Store all quest objectives."
pass
#------------------------------------------------------------
#
# store quest dependency types
#
#------------------------------------------------------------
class quest_dependency_types(model_base.quest_dependency_types):
"Store quest dependency."
pass
#------------------------------------------------------------
#
# store quest dependencies
#
#------------------------------------------------------------
class quest_dependencies(model_base.quest_dependencies):
"Store quest dependency."
pass
# ------------------------------------------------------------
#
# event's type
#
# ------------------------------------------------------------
class event_types(model_base.event_types):
"Event's type"
pass
# ------------------------------------------------------------
#
# event triggers
#
# ------------------------------------------------------------
class event_trigger_types(model_base.event_trigger_types):
"Event's trigger types"
pass
#------------------------------------------------------------
#
# store event data
#
#------------------------------------------------------------
class event_data(model_base.event_data):
"Store event data."
pass
#------------------------------------------------------------
#
# store all dialogues
#
#------------------------------------------------------------
class dialogues(model_base.dialogues):
"Store all dialogues."
pass
#------------------------------------------------------------
#
# store dialogue quest dependencies
#
#------------------------------------------------------------
class dialogue_quest_dependencies(model_base.dialogue_quest_dependencies):
"Store dialogue quest dependencies."
pass
#------------------------------------------------------------
#
# store dialogue relations
#
#------------------------------------------------------------
class dialogue_relations(model_base.dialogue_relations):
"Store dialogue relations."
pass
#------------------------------------------------------------
#
# store dialogue sentences
#
#------------------------------------------------------------
class dialogue_sentences(model_base.dialogue_sentences):
"Store dialogue sentences."
pass
#------------------------------------------------------------
#
# store npc's dialogue
#
#------------------------------------------------------------
class npc_dialogues(model_base.npc_dialogues):
"Store all dialogues."
pass
# ------------------------------------------------------------
#
# event attack's data
#
# ------------------------------------------------------------
class event_attacks(model_base.event_attacks):
"event attack's data"
pass
#------------------------------------------------------------
#
# event dialogues
#
#------------------------------------------------------------
class event_dialogues(model_base.event_dialogues):
"Store all event dialogues."
pass
#------------------------------------------------------------
#
# localized strings
#
#------------------------------------------------------------
class localized_strings(model_base.localized_strings):
"Store all system localized strings."
pass
#------------------------------------------------------------
#
# image resources
#
#------------------------------------------------------------
class image_resources(model_base.image_resources):
"Store all image resource's information."
pass
#------------------------------------------------------------
#
# icon resources
#
#------------------------------------------------------------
class icon_resources(model_base.icon_resources):
"Store all icon resource's information."
pass
|
MarsZone/DreamLand
|
muddery/game_template/worlddata/models.py
|
Python
|
bsd-3-clause
| 12,624
|
# postgresql/pypostgresql.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the PostgreSQL database via py-postgresql.
Connecting
----------
URLs are of the form ``postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]``.
"""
from sqlalchemy import util
from sqlalchemy import types as sqltypes
from sqlalchemy.dialects.postgresql.base import PGDialect, PGExecutionContext
from sqlalchemy import processors
class PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
if self.asdecimal:
return None
else:
return processors.to_float
class PGExecutionContext_pypostgresql(PGExecutionContext):
pass
class PGDialect_pypostgresql(PGDialect):
driver = 'pypostgresql'
supports_unicode_statements = True
supports_unicode_binds = True
description_encoding = None
default_paramstyle = 'pyformat'
# requires trunk version to support sane rowcounts
# TODO: use dbapi version information to set this flag appropriately
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_pypostgresql
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric : PGNumeric,
sqltypes.Float: sqltypes.Float, # prevents PGNumeric from being used
}
)
@classmethod
def dbapi(cls):
from postgresql.driver import dbapi20
return dbapi20
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
else:
opts['port'] = 5432
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
dialect = PGDialect_pypostgresql
|
coolbombom/CouchPotatoServer
|
libs/sqlalchemy/dialects/postgresql/pypostgresql.py
|
Python
|
gpl-3.0
| 2,155
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PurgeParameters(Model):
"""Parameters required for content purge.
:param content_paths: The path to the content to be purged. Can describe a
file path or a wild card directory.
:type content_paths: list[str]
"""
_validation = {
'content_paths': {'required': True},
}
_attribute_map = {
'content_paths': {'key': 'contentPaths', 'type': '[str]'},
}
def __init__(self, content_paths):
super(PurgeParameters, self).__init__()
self.content_paths = content_paths
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-cdn/azure/mgmt/cdn/models/purge_parameters.py
|
Python
|
mit
| 1,055
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('browser', '0037_auto_20170222_1847'),
]
operations = [
#migrations.AddField(
# model_name='compare',
# name='share',
# field=models.BooleanField(default=False),
#),
migrations.AlterField(
model_name='overlap',
name='name',
field=models.CharField(db_index=True, max_length=500, blank=True),
),
migrations.AlterField(
model_name='overlap',
name='name1',
field=models.CharField(db_index=True, max_length=200, blank=True),
),
migrations.AlterField(
model_name='overlap',
name='name2',
field=models.CharField(db_index=True, max_length=50, blank=True),
),
migrations.AlterField(
model_name='overlap',
name='name3',
field=models.CharField(db_index=True, max_length=200, blank=True),
),
migrations.AlterField(
model_name='overlap',
name='name4',
field=models.CharField(db_index=True, max_length=50, blank=True),
),
migrations.AlterField(
model_name='overlap',
name='name5',
field=models.CharField(db_index=True, max_length=200, blank=True),
),
]
|
MRCIEU/melodi
|
browser/migrations/0038_auto_20170222_1851.py
|
Python
|
mit
| 1,491
|
from distutils.core import setup
import py2exe, sys, os
sys.argv.append('py2exe')
setup(
options = {'py2exe':{'bundle_files':1, 'includes':["sip"]}},
windows = [{'script': "AlarmSetup.py"}],
license="MIT",
package_data={"AlarmClock": ["Resources/bell.png"]},
zipfile = None,
)
|
amjith/PyAlarmTimer
|
setup_win.py
|
Python
|
mit
| 317
|
from django.apps import AppConfig
class HealthApp(AppConfig):
name = "normandy.health"
label = "health"
verbose_name = "Normandy Health"
def ready(self):
# Import for side-effect: registers signal handler
import normandy.health.signals # NOQA
|
mozilla/normandy
|
normandy/health/apps.py
|
Python
|
mpl-2.0
| 279
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from data_setup import run_rpmdeplint
def test_prints_usage_when_no_subcommand_is_given():
exitcode, out, err = run_rpmdeplint(['rpmdeplint'])
assert 'usage:' in err
# The first wording is on Python < 3.3, the second wording is on Python 3.3+
assert ('error: too few arguments' in err or
'error: the following arguments are required: subcommand' in err)
assert exitcode == 2
# https://bugzilla.redhat.com/show_bug.cgi?id=1537961
def test_prints_usage_when_no_repos_are_defined():
exitcode, out, err = run_rpmdeplint(['rpmdeplint', 'check', 'some.rpm'])
assert 'usage:' in err
assert 'error: no repos specified to test against' in err
assert exitcode == 2
|
default-to-open/rpmdeplint
|
acceptance_tests/test_usage.py
|
Python
|
gpl-2.0
| 958
|
from __future__ import unicode_literals
from typing import NoReturn, Text, Any, List
import logging
import collections
import uuid
from vstutils.utils import raise_context, ModelHandlers
from .base import BModel, BQuerySet, models
logger = logging.getLogger('polemarch')
class HookHandlers(ModelHandlers):
when_types_names = collections.OrderedDict((
('on_execution', "Before start task"),
('after_execution', "After end task"),
('on_user_add', "When new user register"),
('on_user_upd', "When user update data"),
('on_user_del', "When user was removed"),
('on_object_add', "When new Polemarch object was added"),
('on_object_upd', "When Polemarch object was updated"),
('on_object_del', "When Polemarch object was removed"),
))
when_types = tuple(when_types_names.keys())
def get_handler(self, obj: BModel):
return self[obj.type](obj, self.when_types, **self.opts(obj.type))
@raise_context(AttributeError, exclude=True)
def handle(self, obj: BModel, when: Text, message: Any):
logger.debug("Send hook {} triggered by {}.".format(obj.name, when))
return getattr(self.get_handler(obj), when)(message)
def validate(self, obj: BModel):
return self.get_handler(obj).validate()
class HooksQuerySet(BQuerySet):
use_for_related_fields = True
def when(self, when: Text) -> BQuerySet:
return self.filter(enable=True).filter(models.Q(when=when) | models.Q(when=None))
def execute(self, when: Text, message: Any) -> NoReturn:
for hook in self.when(when):
with raise_context():
hook.run(when, message)
class Hook(BModel):
# pylint: disable=no-member
objects = HooksQuerySet.as_manager()
handlers = HookHandlers("HOOKS", "'type' needed!")
name = models.CharField(max_length=512, default=uuid.uuid1)
type = models.CharField(max_length=32, null=False, db_index=True)
when = models.CharField(max_length=32, null=True, default=None, db_index=True)
enable = models.BooleanField(default=True, db_index=True)
recipients = models.TextField()
@property
def reps(self) -> List[Text]:
return self.recipients.split(' | ')
def run(self, when: Text = 'on_execution', message: Any = None):
return (
self.handlers.handle(self, when, message)
if self.when is None or self.when == when else ''
)
|
vstconsulting/polemarch
|
polemarch/main/models/hooks.py
|
Python
|
agpl-3.0
| 2,474
|
# -*- coding: utf-8 -*-
#
# 2016-04-08 Cornelius Kölbel <cornelius@privacyidea.org>
# Avoid consecutive if-statements
# 2015-02-25 Cornelius Kölbel <cornelius@privacyidea.org>
# Initial writup
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
__doc__ = """This contains the HostsMachineResolver which simply resolves
the machines in a file like /etc/hosts.
The machine id is the IP address in this case.
This file is tested in tests/test_lib_machines.py in the class
HostsMachineTestCase
"""
from .base import Machine
from .base import BaseMachineResolver
from .base import MachineResolverError
import netaddr
class HostsMachineResolver(BaseMachineResolver):
type = "hosts"
def get_machines(self, machine_id=None, hostname=None, ip=None, any=None,
substring=False):
"""
Return matching machines.
:param machine_id: can be matched as substring
:param hostname: can be matched as substring
:param ip: can not be matched as substring
:param substring: Whether the filtering should be a substring matching
:type substring: bool
:param any: a substring that matches EITHER hostname, machineid or ip
:type any: basestring
:return: list of Machine Objects
"""
machines = []
f = open(self.filename, "r")
try:
for line in f:
split_line = line.split()
if len(split_line) < 2:
# skip lines with less than 2 columns
continue
if split_line[0][0] == "#":
# skip comments
continue
line_id = split_line[0]
line_ip = netaddr.IPAddress(split_line[0])
line_hostname = split_line[1:]
# check if machine_id, ip or hostname matches a substring
if (any and any not in line_id and
len([x for x in line_hostname if any in x]) <= 0 and
any not in "{0!s}".format(line_ip)):
# "any" was provided but did not match either
# hostname, ip or machine_id
continue
else:
if machine_id:
if not substring and machine_id == line_id:
return [Machine(self.name, line_id,
hostname=line_hostname, ip=line_ip)]
if substring and machine_id not in line_id:
# do not append this machine!
continue
if hostname:
if substring:
h_match = len([x for x in line_hostname if hostname in x])
else:
h_match = hostname in line_hostname
if not h_match:
# do not append this machine!
continue
if ip and ip != line_ip:
# Do not append this machine!
continue
machines.append(Machine(self.name, line_id,
hostname=line_hostname,
ip=line_ip))
finally:
f.close()
return machines
def get_machine_id(self, hostname=None, ip=None):
"""
Returns the machine id for a given hostname or IP address.
If hostname and ip is given, the resolver should also check that the
hostname matches the IP. If it can check this and hostname and IP do
not match, then an Exception must be raised.
:param hostname: The hostname of the machine
:type hostname: basestring
:param ip: IP address of the machine
:type ip: netaddr
:return: The machine ID, which depends on the resolver
:rtype: basestring
"""
machines = self.get_machines()
for machine in machines:
h_match = not hostname or machine.has_hostname(hostname)
i_match = not ip or machine.has_ip(ip)
if h_match and i_match:
return machine.id
return
def load_config(self, config):
"""
This loads the configuration dictionary, which contains the necessary
information for the machine resolver to find and connect to the
machine store.
:param config: The configuration dictionary to run the machine resolver
:type config: dict
:return: None
"""
self.filename = config.get("filename")
if self.filename is None:
raise MachineResolverError("filename is missing!")
@classmethod
def get_config_description(cls):
description = {cls.type: {"config": {"filename": "string"}}}
return description
@staticmethod
def testconnection(params):
"""
Test if the given filename exists.
:param params:
:return:
"""
return False, "Not Implemented"
|
wheldom01/privacyidea
|
privacyidea/lib/machines/hosts.py
|
Python
|
agpl-3.0
| 5,767
|
#!/usr/bin/env sage -python
import sys
from sage.all import *
if len(sys.argv) != 2:
print "Usage: %s <n>"%sys.argv[0]
print "Outputs the prime factorization of n."
sys.exit(1)
print factor(sage_eval(sys.argv[1]))
|
ctorney/socialInfluence
|
test.py
|
Python
|
mit
| 231
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2014 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from functools import partial
import time
from PyQt5.QtCore import (
QUrl,
QUrlQuery,
)
from PyQt5.QtNetwork import QNetworkRequest
from picard import (
config,
log,
)
from picard.const import (
MUSICBRAINZ_OAUTH_CLIENT_ID,
MUSICBRAINZ_OAUTH_CLIENT_SECRET,
)
from picard.util import (
build_qurl,
load_json,
)
class OAuthManager(object):
def __init__(self, webservice):
self.webservice = webservice
self.setting = config.setting
self.persist = config.persist
@property
def host(self):
return self.setting['server_host']
@property
def port(self):
return self.setting['server_port']
@property
def refresh_token(self):
return self.persist["oauth_refresh_token"]
@refresh_token.setter
def refresh_token(self, value):
self.persist["oauth_refresh_token"] = value
@refresh_token.deleter
def refresh_token(self):
self.persist.remove("oauth_refresh_token")
@property
def refresh_token_scopes(self):
return self.persist["oauth_refresh_token_scopes"]
@refresh_token_scopes.setter
def refresh_token_scopes(self, value):
self.persist["oauth_refresh_token_scopes"] = value
@refresh_token_scopes.deleter
def refresh_token_scopes(self):
self.persist.remove("oauth_refresh_token_scopes")
@property
def access_token(self):
return self.persist["oauth_access_token"]
@access_token.setter
def access_token(self, value):
self.persist["oauth_access_token"] = value
@access_token.deleter
def access_token(self):
self.persist.remove("oauth_access_token")
@property
def access_token_expires(self):
return self.persist["oauth_access_token_expires"]
@access_token_expires.setter
def access_token_expires(self, value):
self.persist["oauth_access_token_expires"] = value
@access_token_expires.deleter
def access_token_expires(self):
self.persist.remove("oauth_access_token_expires")
@property
def username(self):
return self.persist["oauth_username"]
@username.setter
def username(self, value):
self.persist["oauth_username"] = value
def is_authorized(self):
return bool(self.refresh_token and self.refresh_token_scopes)
def is_logged_in(self):
return self.is_authorized() and bool(self.username)
def revoke_tokens(self):
# TODO actually revoke the tokens on MB (I think it's not implementented there)
self.forget_refresh_token()
self.forget_access_token()
def forget_refresh_token(self):
del self.refresh_token
del self.refresh_token_scopes
def forget_access_token(self):
del self.access_token
del self.access_token_expires
def get_access_token(self, callback):
if not self.is_authorized():
callback(access_token=None)
else:
if self.access_token and time.time() < self.access_token_expires:
callback(access_token=self.access_token)
else:
self.forget_access_token()
self.refresh_access_token(callback)
def get_authorization_url(self, scopes):
params = {"response_type": "code", "client_id":
MUSICBRAINZ_OAUTH_CLIENT_ID, "redirect_uri":
"urn:ietf:wg:oauth:2.0:oob", "scope": scopes}
url = build_qurl(self.host, self.port, path="/oauth2/authorize",
queryargs=params)
return bytes(url.toEncoded()).decode()
def set_refresh_token(self, refresh_token, scopes):
log.debug("OAuth: got refresh_token %s with scopes %s", refresh_token, scopes)
self.refresh_token = refresh_token
self.refresh_token_scopes = scopes
def set_access_token(self, access_token, expires_in):
log.debug("OAuth: got access_token %s that expires in %s seconds", access_token, expires_in)
self.access_token = access_token
self.access_token_expires = int(time.time() + expires_in - 60)
def refresh_access_token(self, callback):
log.debug("OAuth: refreshing access_token with a refresh_token %s", self.refresh_token)
path = "/oauth2/token"
url = QUrl()
url_query = QUrlQuery()
url_query.addQueryItem("grant_type", "refresh_token")
url_query.addQueryItem("refresh_token", self.refresh_token)
url_query.addQueryItem("client_id", MUSICBRAINZ_OAUTH_CLIENT_ID)
url_query.addQueryItem("client_secret", MUSICBRAINZ_OAUTH_CLIENT_SECRET)
url.setQuery(url_query.query(QUrl.FullyEncoded))
data = url.query()
self.webservice.post(self.host, self.port, path, data,
partial(self.on_refresh_access_token_finished, callback),
mblogin=True, priority=True, important=True,
request_mimetype="application/x-www-form-urlencoded")
def on_refresh_access_token_finished(self, callback, data, http, error):
access_token = None
try:
if error:
log.error("OAuth: access_token refresh failed: %s", data)
if http.attribute(QNetworkRequest.HttpStatusCodeAttribute) == 400:
response = load_json(data)
if response["error"] == "invalid_grant":
self.forget_refresh_token()
else:
access_token = data["access_token"]
self.set_access_token(access_token, data["expires_in"])
except Exception as e:
log.error('OAuth: Unexpected error handling access token response: %r', e)
finally:
callback(access_token=access_token)
def exchange_authorization_code(self, authorization_code, scopes, callback):
log.debug("OAuth: exchanging authorization_code %s for an access_token", authorization_code)
path = "/oauth2/token"
url = QUrl()
url_query = QUrlQuery()
url_query.addQueryItem("grant_type", "authorization_code")
url_query.addQueryItem("code", authorization_code)
url_query.addQueryItem("client_id", MUSICBRAINZ_OAUTH_CLIENT_ID)
url_query.addQueryItem("client_secret", MUSICBRAINZ_OAUTH_CLIENT_SECRET)
url_query.addQueryItem("redirect_uri", "urn:ietf:wg:oauth:2.0:oob")
url.setQuery(url_query.query(QUrl.FullyEncoded))
data = url.query()
self.webservice.post(self.host, self.port, path, data,
partial(self.on_exchange_authorization_code_finished, scopes, callback),
mblogin=True, priority=True, important=True,
request_mimetype="application/x-www-form-urlencoded")
def on_exchange_authorization_code_finished(self, scopes, callback, data, http, error):
successful = False
try:
if error:
log.error("OAuth: authorization_code exchange failed: %s", data)
else:
self.set_refresh_token(data["refresh_token"], scopes)
self.set_access_token(data["access_token"], data["expires_in"])
successful = True
except Exception as e:
log.error('OAuth: Unexpected error handling authorization code response: %r', e)
finally:
callback(successful=successful)
def fetch_username(self, callback):
log.debug("OAuth: fetching username")
path = "/oauth2/userinfo"
self.webservice.get(self.host, self.port, path,
partial(self.on_fetch_username_finished, callback),
mblogin=True, priority=True, important=True)
def on_fetch_username_finished(self, callback, data, http, error):
successful = False
try:
if error:
log.error("OAuth: username fetching failed: %s", data)
else:
self.username = data["sub"]
log.debug("OAuth: got username %s", self.username)
successful = True
except Exception as e:
log.error('OAuth: Unexpected error handling username fetch response: %r', e)
finally:
callback(successful)
|
antlarr/picard
|
picard/oauth.py
|
Python
|
gpl-2.0
| 9,132
|
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtCore import Qt
from PyQt5.QtQuickWidgets import QQuickWidget
from ninja_ide.core import settings
from ninja_ide.tools import ui_tools
class Notification(QWidget):
"""Notification class with the Logic for the QML UI"""
def __init__(self, parent=None):
super(Notification, self).__init__(None, Qt.ToolTip)
self._parent = parent
self._duration = 1800
self._text = ""
self._running = False
self.setAttribute(Qt.WA_TranslucentBackground, True)
self.setAttribute(Qt.WA_TransparentForMouseEvents)
self.setAttribute(Qt.WA_ShowWithoutActivating)
self.setFixedHeight(30)
# Create the QML user interface.
view = QQuickWidget()
view.setClearColor(Qt.transparent)
view.setResizeMode(QQuickWidget.SizeRootObjectToView)
view.setSource(ui_tools.get_qml_resource("Notification.qml"))
self._root = view.rootObject()
vbox = QVBoxLayout(self)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
vbox.addWidget(view)
self._root.close.connect(self.close)
self._parent.goingDown.connect(self.close)
def set_parent(self, parent):
self._parent = parent
def hideEvent(self, event):
super().hideEvent(event)
self._running = False
def showEvent(self, event):
"""Method takes an event to show the Notification"""
super(Notification, self).showEvent(event)
width, pgeo = self._parent.width(), self._parent.geometry()
conditional_vertical = settings.NOTIFICATION_POSITION in (0, 1)
conditional_horizontal = settings.NOTIFICATION_POSITION in (0, 2)
x = pgeo.left() if conditional_horizontal else pgeo.right()
y = (pgeo.bottom() - self.height() + 1
if conditional_vertical else pgeo.top())
self.setFixedWidth(width)
self.setGeometry(x, y, self.width(), self.height())
background_color = str(settings.NOTIFICATION_COLOR)
foreground_color = str(
settings.NOTIFICATION_COLOR).lower().maketrans(
'0123456789abcdef', 'fedcba9876543210')
foreground_color = background_color.translate(foreground_color)
self._root.setColor(background_color, foreground_color)
self._root.start(self._duration)
def set_message(self, text='', duration=1800):
"""Method that takes str text and int duration to setup Notification"""
self._text = text
if self._running:
self._root.updateText(self._text)
else:
self._root.setText(self._text)
self._duration = duration
self._running = True
|
ninja-ide/ninja-ide
|
ninja_ide/gui/notification.py
|
Python
|
gpl-3.0
| 3,469
|
"""
Test scenarios for the crowdsource hinter xblock.
"""
import json
import unittest
from nose.plugins.attrib import attr
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.courseware.tests.factories import GlobalStaffFactory
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from django.conf import settings
class TestCrowdsourceHinter(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Create the test environment with the crowdsourcehinter xblock.
"""
STUDENTS = [
{'email': 'view@test.com', 'password': 'foo'},
{'email': 'view2@test.com', 'password': 'foo'}
]
XBLOCK_NAMES = ['crowdsourcehinter']
@classmethod
def setUpClass(cls):
# Nose runs setUpClass methods even if a class decorator says to skip
# the class: https://github.com/nose-devs/nose/issues/946
# So, skip the test class here if we are not in the LMS.
if settings.ROOT_URLCONF != 'lms.urls':
raise unittest.SkipTest('Test only valid in lms')
super(TestCrowdsourceHinter, cls).setUpClass()
cls.course = CourseFactory.create(
display_name='CrowdsourceHinter_Test_Course'
)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.chapter = ItemFactory.create(
parent=cls.course, display_name='Overview'
)
cls.section = ItemFactory.create(
parent=cls.chapter, display_name='Welcome'
)
cls.unit = ItemFactory.create(
parent=cls.section, display_name='New Unit'
)
cls.xblock = ItemFactory.create(
parent=cls.unit,
category='crowdsourcehinter',
display_name='crowdsourcehinter'
)
cls.course_url = reverse(
'courseware_section',
kwargs={
'course_id': cls.course.id.to_deprecated_string(),
'chapter': 'Overview',
'section': 'Welcome',
}
)
def setUp(self):
super(TestCrowdsourceHinter, self).setUp()
for idx, student in enumerate(self.STUDENTS):
username = "u{}".format(idx)
self.create_account(username, student['email'], student['password'])
self.activate_user(student['email'])
self.staff_user = GlobalStaffFactory()
def get_handler_url(self, handler, xblock_name=None):
"""
Get url for the specified xblock handler
"""
if xblock_name is None:
xblock_name = TestCrowdsourceHinter.XBLOCK_NAMES[0]
return reverse('xblock_handler', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(self.course.id.make_usage_key('crowdsourcehinter', xblock_name).
to_deprecated_string()),
'handler': handler,
'suffix': ''
})
def enroll_student(self, email, password):
"""
Student login and enroll for the course
"""
self.login(email, password)
self.enroll(self.course, verify=True)
def enroll_staff(self, staff):
"""
Staff login and enroll for the course
"""
email = staff.email
password = 'test'
self.login(email, password)
self.enroll(self.course, verify=True)
def initialize_database_by_id(self, handler, resource_id, times, xblock_name=None):
"""
Call a ajax event (vote, delete, endorse) on a resource by its id
several times
"""
if xblock_name is None:
xblock_name = TestCrowdsourceHinter.XBLOCK_NAMES[0]
url = self.get_handler_url(handler, xblock_name)
for _ in range(times):
self.client.post(url, json.dumps({'id': resource_id}), '')
def call_event(self, handler, resource, xblock_name=None):
"""
Call a ajax event (add, edit, flag, etc.) by specifying the resource
it takes
"""
if xblock_name is None:
xblock_name = TestCrowdsourceHinter.XBLOCK_NAMES[0]
url = self.get_handler_url(handler, xblock_name)
return self.client.post(url, json.dumps(resource), '')
def check_event_response_by_element(self, handler, resource, resp_key, resp_val, xblock_name=None):
"""
Call the event specified by the handler with the resource, and check
whether the element (resp_key) in response is as expected (resp_val)
"""
if xblock_name is None:
xblock_name = TestCrowdsourceHinter.XBLOCK_NAMES[0]
resp = self.call_event(handler, resource, xblock_name)
self.assertEqual(resp[resp_key], resp_val)
self.assert_request_status_code(200, self.course_url)
@attr('shard_1')
class TestHinterFunctions(TestCrowdsourceHinter):
"""
Check that the essential functions of the hinter work as expected.
Tests cover the basic process of receiving a hint, adding a new hint,
and rating/reporting hints.
"""
def test_get_hint_with_no_hints(self):
"""
Check that a generic statement is returned when no default/specific hints exist
"""
result = self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'}, 'crowdsourcehinter')
expected = {'BestHint': 'Sorry, there are no hints for this answer.', 'StudentAnswer': 'incorrect answer 1',
'HintCategory': False}
self.assertEqual(json.loads(result.content), expected)
def test_add_new_hint(self):
"""
Test the ability to add a new specific hint
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
data = {'new_hint_submission': 'new hint for answer 1', 'answer': 'incorrect answer 1'}
self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
result = self.call_event('add_new_hint', data)
expected = {'success': True,
'result': 'Hint added'}
self.assertEqual(json.loads(result.content), expected)
def test_get_hint(self):
"""
Check that specific hints are returned
"""
self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
submission = {'new_hint_submission': 'new hint for answer 1',
'answer': 'incorrect answer 1'}
self.call_event('add_new_hint', submission)
result = self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
expected = {'BestHint': 'new hint for answer 1', 'StudentAnswer': 'incorrect answer 1',
'HintCategory': 'ErrorResponse'}
self.assertEqual(json.loads(result.content), expected)
def test_rate_hint_upvote(self):
"""
Test hint upvoting
"""
self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
submission = {'new_hint_submission': 'new hint for answer 1',
'answer': 'incorrect answer 1'}
self.call_event('add_new_hint', submission)
data = {
'student_answer': 'incorrect answer 1',
'hint': 'new hint for answer 1',
'student_rating': 'upvote'
}
expected = {'success': True}
result = self.call_event('rate_hint', data)
self.assertEqual(json.loads(result.content), expected)
def test_rate_hint_downvote(self):
"""
Test hint downvoting
"""
self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
submission = {'new_hint_submission': 'new hint for answer 1',
'answer': 'incorrect answer 1'}
self.call_event('add_new_hint', submission)
data = {
'student_answer': 'incorrect answer 1',
'hint': 'new hint for answer 1',
'student_rating': 'downvote'
}
expected = {'success': True}
result = self.call_event('rate_hint', data)
self.assertEqual(json.loads(result.content), expected)
def test_report_hint(self):
"""
Test hint reporting
"""
self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
submission = {'new_hint_submission': 'new hint for answer 1',
'answer': 'incorrect answer 1'}
self.call_event('add_new_hint', submission)
data = {
'student_answer': 'incorrect answer 1',
'hint': 'new hint for answer 1',
'student_rating': 'report'
}
expected = {'rating': 'reported', 'hint': 'new hint for answer 1'}
result = self.call_event('rate_hint', data)
self.assertEqual(json.loads(result.content), expected)
def test_dont_show_reported_hint(self):
"""
Check that reported hints are returned
"""
self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
submission = {'new_hint_submission': 'new hint for answer 1',
'answer': 'incorrect answer 1'}
self.call_event('add_new_hint', submission)
data = {
'student_answer': 'incorrect answer 1',
'hint': 'new hint for answer 1',
'student_rating': 'report'
}
self.call_event('rate_hint', data)
result = self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
expected = {'BestHint': 'Sorry, there are no hints for this answer.', 'StudentAnswer': 'incorrect answer 1',
'HintCategory': False}
self.assertEqual(json.loads(result.content), expected)
def test_get_used_hint_answer_data(self):
"""
Check that hint/answer information from previous submissions are returned upon correctly
answering the problem
"""
self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
self.call_event('get_used_hint_answer_data', "")
submission = {'new_hint_submission': 'new hint for answer 1',
'answer': 'incorrect answer 1'}
self.call_event('add_new_hint', submission)
self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
result = self.call_event('get_used_hint_answer_data', "")
expected = {'new hint for answer 1': 'incorrect answer 1'}
self.assertEqual(json.loads(result.content), expected)
def test_show_best_hint(self):
"""
Check that the most upvoted hint is shown
"""
self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
submission1 = {'new_hint_submission': 'new hint for answer 1',
'answer': 'incorrect answer 1'}
submission2 = {'new_hint_submission': 'new hint for answer 1 to report',
'answer': 'incorrect answer 1'}
self.call_event('add_new_hint', submission1)
self.call_event('add_new_hint', submission2)
data_upvote = {
'student_answer': 'incorrect answer 1',
'hint': 'new hint for answer 1 to report',
'student_rating': 'upvote'
}
self.call_event('rate_hint', data_upvote)
data_downvote = {
'student_answer': 'incorrect answer 1',
'hint': 'new hint for answer 1 to report',
'student_rating': 'report'
}
self.call_event('rate_hint', data_downvote)
result = self.call_event('get_hint', {'submittedanswer': 'ans=incorrect+answer+1'})
expected = {'BestHint': 'new hint for answer 1', 'StudentAnswer': 'incorrect answer 1',
'HintCategory': 'ErrorResponse'}
self.assertEqual(json.loads(result.content), expected)
|
Learningtribes/edx-platform
|
openedx/tests/xblock_integration/test_crowdsource_hinter.py
|
Python
|
agpl-3.0
| 12,130
|
import datetime
import random
from colour import Color
from cue_csgo.helpers import color_gradient
class BaseRender(object):
require_color_info = False
def __init__(self, keyboard, settings, require_color_info=None):
self.keyboard = keyboard
self.settings = settings
if require_color_info is not None:
self.require_color_info = require_color_info
def render(self, game_state):
raise NotImplementedError
class BackgroundRender(BaseRender):
def render(self, game_state):
team = game_state["player"]["team"]
if team == "CT":
background_color = Color(self.settings["ct_color"])
if team == "T":
background_color = Color(self.settings["t_color"])
for x in self.keyboard.all_leds:
yield x, background_color
class HpRender(BaseRender):
def render(self, game_state):
hp = game_state["player"]["state"]["health"]
for x in range(20):
if x < hp//5:
yield self.keyboard.hp_bars[x], Color("red")
class WeaponRender(BaseRender):
def render(self, game_state):
active_color = Color("#00FF00")
empty_clip_color = Color("#FF0000")
weapons = game_state["player"]["weapons"]
for key, weapon in weapons.items():
try:
if weapon["type"] == "Knife":
yield 16, active_color
if weapon["type"] == "Pistol":
yield 15, color_gradient(empty_clip_color.hex, active_color.hex, weapon["ammo_clip_max"]+1)[weapon["ammo_clip"]]
if weapon["type"] in ["Rifle", "Shotgun", "Machine Gun", "Submachine Gun", "SniperRifle"]:
yield 14, color_gradient(empty_clip_color.hex, active_color.hex, weapon["ammo_clip_max"]+1)[weapon["ammo_clip"]]
if weapon["type"] == "C4":
yield 18, active_color
if weapon["type"] == "Grenade":
yield 17, active_color
if weapon["name"] == "weapon_hegrenade":
yield 19, active_color
if weapon["name"] == "weapon_flashbang":
yield 20, active_color
if weapon["name"] == "weapon_smokegrenade":
yield 21, active_color
if weapon["name"] == "weapon_decoy":
yield 22, active_color
if weapon["name"] in ["weapon_molotov", "weapon_incgrenade"]:
yield 23, active_color
except KeyError:
pass # Zeus
class BombRender(BaseRender):
def __init__(self, *args, **kwargs):
super(BombRender, self).__init__(*args, **kwargs)
self.beep_till = None
self.sleep_till = None
self.bomb_timer = None
def _blink_timeout(self, sec):
beep_distance_start = 0.83
beep_distance_end = 0.05
return beep_distance_start - (sec / (((self.settings["explode_time"] - 1) / beep_distance_start) + beep_distance_end))
def render(self, game_state):
now = datetime.datetime.now()
if self.bomb_timer is None:
try:
assert game_state["round"]["bomb"] == "planted" # Check if bomb is planted
self.bomb_timer = datetime.datetime.now()
except (KeyError, AssertionError):
pass
else:
try:
assert game_state["round"]["bomb"] == "planted" # Check if bomb still is planted, else stop
# noinspection PyTypeChecker
if (now-self.bomb_timer).total_seconds() > self.settings["explode_time"] - 2.3: # if there is under one second left, display yellowish
for x in self.keyboard.keypad:
yield x, Color("#AEC187")
else:
if self.beep_till is not None:
if self.beep_till < now:
self.beep_till = None
time_out = self._blink_timeout((now - self.bomb_timer).total_seconds())
self.sleep_till = now + datetime.timedelta(seconds=time_out)
else:
for x in self.keyboard.keypad:
yield x, Color("Red")
elif self.sleep_till is not None:
if self.sleep_till < now:
self.sleep_till = None
self.beep_till = now + datetime.timedelta(seconds=0.135) # Beep length ~ 0.135-0.140
else:
self.beep_till = now + datetime.timedelta(seconds=0.135) # Beep length ~ 0.135-0.140
except (KeyError, AssertionError):
self.bomb_timer = None
self.sleep_till = None
self.beep_till = None
class FlashbangRender(BaseRender):
require_color_info = True
def render(self, game_state, keyboard_color=None):
flashed_value = int(game_state["player"]["state"]["flashed"])
if self.settings["gradient"]:
for key, org_color in keyboard_color.items():
if flashed_value > 250:
yield key, Color("white") # Removes redundant calculations
elif flashed_value > 10:
yield key, color_gradient(org_color.hex, "white", 16)[flashed_value//16]
else:
for key, org_color in keyboard_color.items():
if flashed_value > 50:
yield key, Color("white")
class SmokeRender(BaseRender):
require_color_info = True
def render(self, game_state, keyboard_color=None):
smoked_value = int(game_state["player"]["state"]["smoked"])
if self.settings["gradient"]:
for key, org_color in keyboard_color.items():
if smoked_value > 250:
yield key, Color("Grey") # Removes redundant calculations
elif smoked_value > 10:
yield key, color_gradient(org_color.hex, "Grey", 16)[smoked_value//16]
else:
for key, org_color in keyboard_color.items():
if smoked_value > 50:
yield key, Color("Grey")
class FireRender(BaseRender):
def __init__(self, *args, **kwargs):
super(FireRender, self).__init__(*args, **kwargs)
leds = self.keyboard.device.led_positions()["pLedPosition"]
self.lowest_row = [x for x, y in leds.items() if y["top"] > 125]
self.second_lowest_row = [x for x, y in leds.items() if 125 > y["top"] > 105]
self.third_lowest_row = [x for x, y in leds.items() if 105 > y["top"] > 87]
self.last_update = datetime.datetime.now()
self.last_update_dict = {}
def render(self, game_state):
burning_value = int(game_state["player"]["state"]["burning"])
if burning_value >= 255:
if self.last_update < datetime.datetime.now() - datetime.timedelta(microseconds=90000):
self.last_update_dict = {}
for key in self.lowest_row:
self.last_update_dict[key] = Color("red")
for key in self.second_lowest_row:
if random.randint(0, 100) > 40:
self.last_update_dict[key] = Color("red")
for key in self.third_lowest_row:
if random.randint(0, 100) > 80:
self.last_update_dict[key] = Color("red")
self.last_update = datetime.datetime.now()
for key, value in self.last_update_dict.items():
yield key, value
class ChatRender(BaseRender):
def render(self, game_state):
if game_state["player"]["activity"] == "textinput":
for x in self.keyboard.all_leds:
yield x, Color(self.settings["color"])
all_renders = (BackgroundRender, HpRender, WeaponRender, BombRender, FlashbangRender, SmokeRender, FireRender, ChatRender)
|
Fire-Proof/cue-csgo
|
cue_csgo/renders.py
|
Python
|
mit
| 8,100
|
# Inspired from http://stackoverflow.com/a/8759188/817766
from threading import currentThread
from meteography.dataset import DataSet
_request_cache = {}
_installed_middleware = False
def get_dataset_cache():
assert _installed_middleware, 'RequestCacheMiddleware not loaded'
return _request_cache[currentThread()]
class DataSetCache(object):
def __init__(self):
self._cache = dict()
def __getitem__(self, key):
dataset = self._cache.get(key)
if dataset is None:
dataset = DataSet.open(key)
self._cache[key] = dataset
return dataset
def clear(self):
for dataset in self._cache.values():
dataset.close()
self._cache.clear()
class RequestCacheMiddleware(object):
def __init__(self):
global _installed_middleware
_installed_middleware = True
def process_request(self, request):
_request_cache[currentThread()] = DataSetCache()
def clean_cache(self):
t = currentThread()
cache = _request_cache.get(t)
if cache is not None:
cache.clear()
del _request_cache[t]
def process_response(self, request, response):
self.clean_cache()
return response
def process_exception(self, request, exception):
self.clean_cache()
|
rthouvenin/meteography
|
meteography/django/broadcaster/request_cache.py
|
Python
|
mit
| 1,339
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.service import shares
from sahara.utils.openstack import manila
SHARE_SCHEMA = {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uuid"
},
"path": {
"type": ["string", "null"]
},
"access_level": {
"type": ["string", "null"],
"enum": ["rw", "ro"],
"default": "rw"
}
},
"additionalProperties": False,
"required": [
"id"
]
}
}
def check_shares(data):
if not data:
return
paths = (share.get('path') for share in data)
paths = [path for path in paths if path is not None]
if len(paths) != len(set(paths)):
raise ex.InvalidDataException(
_('Multiple shares cannot be mounted to the same path.'))
for path in paths:
if not path.startswith('/') or '\x00' in path:
raise ex.InvalidDataException(
_('Paths must be absolute Linux paths starting with "/"'
'and may not contain nulls.'))
client = manila.client()
for share in data:
manila_share = manila.get_share(client, share['id'])
if not manila_share:
raise ex.InvalidReferenceException(
_("Requested share id %s does not exist.") % share['id'])
share_type = manila_share.share_proto
if share_type not in shares.SUPPORTED_SHARE_TYPES:
raise ex.InvalidReferenceException(
_("Requested share id %(id)s is of type %(type)s, which is "
"not supported by Sahara.")
% {"id": share['id'], "type": share_type})
|
tellesnobrega/sahara
|
sahara/service/validations/shares.py
|
Python
|
apache-2.0
| 2,431
|
# loops through all fields in a shp/dbf and outputs moran's I, z-score, and p-value of each to a csv table
import os
import pysal
import csv
import time
import numpy as np
from osgeo import ogr
begin_time = time.clock()
#open shp
shp = "PATH.shp"
#gdal layer reader
driver = ogr.GetDriverByName('ESRI Shapefile')
dataSource = driver.Open(shp, 0) # 0 means read-only. 1 means writeable.
layer = dataSource.GetLayer()
layerDefinition = layer.GetLayerDefn()
#open dbf
dbf = pysal.open('PATH.dbf','r')
# create spatial weight matrix
w = pysal.queen_from_shapefile(shp)
# csv write setup
out_table = r"PATH.csv"
writer = csv.writer(open(out_table, 'a'))
for i in range(layerDefinition.GetFieldCount()):
fieldName = layerDefinition.GetFieldDefn(i).GetName()
fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()
fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)
fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()
GetPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()
print fieldName + " - " + fieldType+ " " + str(fieldWidth) + " " + str(GetPrecision)
print "_______"
y = np.array(dbf.by_col[fieldName])
mi = pysal.Moran(y, w, two_tailed=False)
print "I = %f" %mi.I
print "Z-score = %f" %mi.z_norm
print "p-value = %f" %mi.p_norm
I = mi.I
Z = mi.z_norm
P = mi.p_norm
row = [fieldName, I, Z, P]
writer.writerow(row)
print "_______"
print "_______"
end_time = time.clock()
print (end_time - begin_time)
|
jamaps/open_geo_scripts
|
morans_I.py
|
Python
|
mit
| 1,544
|
from __future__ import unicode_literals
import json
import logging
import requests
from requests.exceptions import HTTPError
from requests_hawk import HawkAuth
# The Python client release process is documented here:
# https://treeherder.readthedocs.io/common_tasks.html#releasing-a-new-version-of-the-python-client
__version__ = '4.0.0'
logger = logging.getLogger(__name__)
class ValidatorMixin(object):
def validate(self, required_properties={}):
"""
Implement job object validation rules. If a rule fails to validate
raise TreeherderClientError
Classes using this mixin should implement a required_properties
dict. The keys in this dict are the required keys in the struture
contained in self.data. Nested keys can be specified with the '.'
operator. Each key in required_properties should have a dict value
like so:
{
'len':optional, some int, max allowed len of property value
'type':optional, some data type, required type of property
value
'cb': some function reference, called with
list of keys, list of values, required_properties key
}
Example:
self.required_properties = {
'revision':{
'len':40, 'cb':self.validate_existence
},
'project':{
'cb':self.validate_existence
},
'job':{
'type':dict, 'cb':self.validate_existence
},
'job.job_guid':{
'len':50, 'cb':self.validate_existence
}
}
"""
required_properties = required_properties or self.required_properties
for prop in required_properties:
cb = required_properties[prop]['cb']
cb(prop.split('.'), required_properties[prop], prop)
def validate_existence(self, keys, values, property_key):
"""
This required_properties callback method confirms the following.
- The keys provided are found in required_properties
- The type of the values match the specified type
- The values are defined and less than the required len
if a len is specified
If any of these assertions fail TreeherderClientError is raised
"""
# missing keys
missing_keys = []
property_errors = ''
# get value
v = None
for index, k in enumerate(keys):
if index > 0:
try:
v = v[k]
except KeyError:
missing_keys.append(k)
else:
try:
v = self.data[k]
except KeyError:
missing_keys.append(k)
if missing_keys:
property_errors += ('\tThe required Property, {0}, is '
'missing\n'.format('.'.join(missing_keys)))
if not v:
property_errors += '\tValue not defined for {0}\n'.format(
property_key)
elif ('type' in values) and (not isinstance(v, values['type'])):
property_errors += ('\tThe value type, {0}, should be '
'{1}\n'.format(type(v), values['type']))
max_limit = values.get('len', None)
if v and max_limit and (len(v) > max_limit):
property_errors += ('\tValue length exceeds maximum {0} char '
'limit: {1}\n'.format(str(max_limit), str(v)))
if property_errors:
msg = ('{0} structure validation errors detected for property:{1}'
'\n{2}\n{3}\n'.format(
self.__class__.__name__, property_key, property_errors,
json.dumps(self.data)))
raise TreeherderClientError(msg, [])
class TreeherderData(object):
def __init__(self, data={}):
self.data = {}
if data:
self.data = data
else:
self.init_data()
def to_json(self):
return json.dumps(self.data)
class TreeherderJob(TreeherderData, ValidatorMixin):
PARSE_STATUSES = {'pending', 'parsed', 'error'}
def __init__(self, data={}):
super(TreeherderJob, self).__init__(data)
# Provide minimal json structure validation
self.required_properties = {
'revision': {'len': 40, 'cb': self.validate_existence},
'project': {'cb': self.validate_existence},
'job': {'type': dict, 'cb': self.validate_existence},
'job.job_guid': {'len': 50, 'cb': self.validate_existence}
}
def add_revision(self, revision):
self.data['revision'] = revision
def add_coalesced_guid(self, guids):
if guids:
self.data['superseded'].extend(guids)
def add_project(self, project):
self.data['project'] = project
def add_job_guid(self, guid):
self.data['job']['job_guid'] = guid
def add_job_name(self, name):
self.data['job']['name'] = name
def add_job_symbol(self, symbol):
self.data['job']['job_symbol'] = symbol
def add_group_name(self, name):
self.data['job']['group_name'] = name
def add_group_symbol(self, symbol):
self.data['job']['group_symbol'] = symbol
def add_description(self, desc):
self.data['job']['desc'] = desc
def add_product_name(self, name):
self.data['job']['product_name'] = name
def add_state(self, state):
self.data['job']['state'] = state
def add_result(self, result):
self.data['job']['result'] = result
def add_reason(self, reason):
self.data['job']['reason'] = reason
def add_who(self, who):
self.data['job']['who'] = who
def add_submit_timestamp(self, tstamp):
self.data['job']['submit_timestamp'] = tstamp
def add_start_timestamp(self, tstamp):
self.data['job']['start_timestamp'] = tstamp
def add_end_timestamp(self, tstamp):
self.data['job']['end_timestamp'] = tstamp
def add_machine(self, machine):
self.data['job']['machine'] = machine
def add_build_info(self, os_name, platform, arch):
self.data['job']['build_platform']['os_name'] = os_name
self.data['job']['build_platform']['platform'] = platform
self.data['job']['build_platform']['architecture'] = arch
def add_machine_info(self, os_name, platform, arch):
self.data['job']['machine_platform']['os_name'] = os_name
self.data['job']['machine_platform']['platform'] = platform
self.data['job']['machine_platform']['architecture'] = arch
def add_option_collection(self, option_collection):
if option_collection:
self.data['job']['option_collection'].update(option_collection)
def add_tier(self, tier):
self.data['job']['tier'] = tier
def add_log_reference(self, name, url, parse_status='pending'):
"""
parse_status - one of 'pending', 'parsed' or 'error'
"""
if parse_status not in self.PARSE_STATUSES:
msg = "{0}: Invalid parse_status '{1}': must be one of: {2}".format(
self.__class__.__name__,
parse_status,
', '.join(self.PARSE_STATUSES)
)
raise TreeherderClientError(msg, [])
if name and url:
self.data['job']['log_references'].append(
{'url': url, 'name': name, 'parse_status': parse_status}
)
def add_artifact(self, name, artifact_type, blob):
if blob:
self.data['job']['artifacts'].append({
'name': name,
'type': artifact_type,
'blob': blob,
'job_guid': self.data['job']['job_guid']
})
def init_data(self):
self.data = {
'revision': '',
'project': '',
'job': {
# Stored in project_jobs_1.job.job_guid
'job_guid': '',
# Stored in treeherder_reference_1.job_type.name
'name': '',
# Stored in treeherder_reference_1.job_type.name
'desc': '',
# Stored symbol represending the job in the UI
# Stored in treeherder_reference_1.job_type.symbol
'job_symbol': '',
# human readable group name (can be null)
# Stored in treeherder_reference_1.job_group.name
'group_name': '',
# Stored symbol representing the job group (can be null)
# Stored in treeherder_reference_1.job_group.symbol
'group_symbol': '',
# Stored in treeherder_reference_1.product
'product_name': '',
# Stored in project_jobs_1.job.state
'state': '',
# Stored in project_jobs_1.job.result
'result': '',
# Stored in project_jobs_1.job.reason
'reason': '',
# Stored in project_jobs_1.job.who
'who': '',
# Stored in project_jobs_1.job.submit_timestamp
'submit_timestamp': '',
# Stored in project_jobs_1.job.start_timestamp
'start_timestamp': '',
# Stored in project_jobs_1.job.end_timestamp
'end_timestamp': '',
# Stored in treeherder_reference_1.machine.name
'machine': '',
# Stored in
# treeherder_reference_1.build_platform.os_name,
# treeherder_reference_1.build_platform.platform,
# treeherder_reference_1.build_platform.architecture,
'build_platform': {
'os_name': '', 'platform': '', 'architecture': ''},
# Stored in:
# treeherder_reference_1.machine_platform.os_name,
# treeherder_reference_1.machine_platform.platform,
# treeherder_reference_1.machine_platform.architecture,
'machine_platform': {
'os_name': '', 'platform': '', 'architecture': ''},
# Stored in treeherder_reference_1.option_collection and
# treeherder_reference_1.option
# Ex: 'debug | pgo | asan | opt': True
'option_collection': {},
# Stored in treeherder_reference_1.job_log_url
# Example:
# log_references: [
# { url: 'http://ftp.mozilla.org/mozilla.org/firefox.gz',
# name: 'unittest' },
'log_references': [],
# Stored in
# project_jobs_1.job_artifact.name
# project_jobs_1.job_artifact.type
# project_jobs_1.job_artifact.blob
'artifacts': []
},
# List of job_guids that were superseded by this job
# Stored in project_jobs_1.job.coalesced_job_guid
# Where the value of coalesced_job_guid is set to job_guid
# for the list of job_guids provided in superseded
'superseded': []
}
class TreeherderCollection(object):
"""
Base class for treeherder data collections
"""
def __init__(self, endpoint_base, data=[]):
self.data = []
self.endpoint_base = endpoint_base
if data:
self.data = data
def get_collection_data(self):
"""
Build data structure containing the data attribute only for
each item in the collection
"""
data_struct = []
for datum_instance in self.data:
data_struct.append(datum_instance.data)
return data_struct
def to_json(self):
"""
Convert list of data objects to json
"""
return json.dumps(self.get_collection_data())
def add(self, datum_instance):
"""
Add a data structure class instance to data list
"""
self.data.append(datum_instance)
def validate(self):
"""
validate the data structure class
"""
for d in self.data:
d.validate()
def get_chunks(self, chunk_size):
"""
Return a generator of new collections broken into chunks of size ``chunk_size``.
Each chunk will be a ``TreeherderCollection`` of the same
type as the original with a max of ``chunk_size`` count of
``TreeherderData`` objects.
Each collection must then be POSTed individually.
"""
for i in range(0, len(self.data), chunk_size):
# we must copy not only the data chunk,
# but also the endpoint_base or any other field of the
# collection. In the case of a TreeherderJobCollection,
# this is determined in the constructor.
chunk = self.__class__(self.data[i:i + chunk_size])
chunk.endpoint_base = self.endpoint_base
yield chunk
class TreeherderJobCollection(TreeherderCollection):
"""
Collection of job objects
"""
def __init__(self, data=[]):
super(TreeherderJobCollection, self).__init__('jobs', data)
def get_job(self, data={}):
return TreeherderJob(data)
class TreeherderClient(object):
"""
Treeherder client class
"""
API_VERSION = '1.0'
REQUEST_HEADERS = {
'Accept': 'application/json; version={}'.format(API_VERSION),
'User-Agent': 'treeherder-pyclient/{}'.format(__version__),
}
PUSH_ENDPOINT = 'push'
JOBS_ENDPOINT = 'jobs'
JOB_DETAIL_ENDPOINT = 'jobdetail'
JOB_LOG_URL_ENDPOINT = 'job-log-url'
OPTION_COLLECTION_HASH_ENDPOINT = 'optioncollectionhash'
REPOSITORY_ENDPOINT = 'repository'
JOBGROUP_ENDPOINT = 'jobgroup'
JOBTYPE_ENDPOINT = 'jobtype'
PRODUCT_ENDPOINT = 'product'
MACHINE_ENDPOINT = 'machine'
MACHINE_PLATFORM_ENDPOINT = 'machineplatform'
FAILURE_CLASSIFICATION_ENDPOINT = 'failureclassification'
BUILD_PLATFORM_ENDPOINT = 'buildplatform'
MAX_COUNT = 2000
def __init__(self, server_url='https://treeherder.mozilla.org',
timeout=30, client_id=None, secret=None):
"""
:param server_url: The site URL of the Treeherder instance (defaults to production)
:param timeout: maximum time it can take for a request to complete
:param client_id: the Treeherder API credentials client ID
:param secret: the Treeherder API credentials secret
"""
self.server_url = server_url
self.timeout = timeout
# Using a session gives us automatic keep-alive/connection pooling.
self.session = requests.Session()
self.session.headers.update(self.REQUEST_HEADERS)
if client_id and secret:
self.session.auth = HawkAuth(id=client_id, key=secret)
def _get_endpoint_url(self, endpoint, project=None):
if project:
return '{}/api/project/{}/{}/'.format(self.server_url, project, endpoint)
return '{}/api/{}/'.format(self.server_url, endpoint)
def _get_json_list(self, endpoint, project=None, **params):
if "count" in params and (params["count"] is None or params["count"] > self.MAX_COUNT):
total = None if params["count"] is None else params["count"]
count = self.MAX_COUNT
offset = 0
data = []
while True:
params["count"] = count
params["offset"] = offset
new_data = self._get_json(endpoint, project=project, **params)["results"]
data += new_data
if len(new_data) < self.MAX_COUNT:
return data
offset += count
if total is not None:
count = min(total-offset, self.MAX_COUNT)
else:
return self._get_json(endpoint, project=project, **params)["results"]
def _get_json(self, endpoint, project=None, **params):
url = self._get_endpoint_url(endpoint, project=project)
resp = self.session.get(url, params=params, timeout=self.timeout)
try:
resp.raise_for_status()
except HTTPError:
logger.error("HTTPError %s requesting %s: %s",
resp.status_code, resp.request.url, resp.content)
logger.debug("Request headers: %s", resp.request.headers)
logger.debug("Response headers: %s", resp.headers)
raise
return resp.json()
def _post_json(self, project, endpoint, data):
url = self._get_endpoint_url(endpoint, project=project)
resp = self.session.post(url, json=data, timeout=self.timeout)
try:
resp.raise_for_status()
return resp
except HTTPError:
logger.error("HTTPError %s submitting to %s: %s",
resp.status_code, resp.request.url, resp.content)
logger.debug("Request headers: %s", resp.request.headers)
logger.debug("Request body: %s", resp.request.body)
logger.debug("Response headers: %s", resp.headers)
raise
def get_option_collection_hash(self):
"""
Gets option collection hash, a mapping of hash values to build properties
Returns a dictionary with the following structure:
{
hashkey1: [ { key: value }, { key: value }, ... ],
hashkey2: [ { key: value }, { key: value }, ... ],
...
}
"""
resp = self._get_json(self.OPTION_COLLECTION_HASH_ENDPOINT)
ret = {}
for result in resp:
ret[result['option_collection_hash']] = result['options']
return ret
def get_repositories(self):
"""
Gets a list of valid treeherder repositories.
Returns a list with the following structure:
[
{name: repository-name, dvcs_type: dcvs-type, ...},
...
]
"""
return self._get_json(self.REPOSITORY_ENDPOINT)
def get_products(self):
"""
Get a list of treeherder products.
Returns a list with the following structure:
{
id: <id>,
name: <name>,
description: <description>
}
"""
return self._get_json(self.PRODUCT_ENDPOINT)
def get_job_groups(self):
"""
Gets a list of job groups stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>,
symbol: <symbol>,
name: <name>
...
}
"""
return self._get_json(self.JOBGROUP_ENDPOINT)
def get_failure_classifications(self):
"""
Gets a list of failure classification types stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>,
name: <name>,
description: <description>
}
"""
return self._get_json(self.FAILURE_CLASSIFICATION_ENDPOINT)
def get_build_platforms(self):
"""
Gets a list of build platforms stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>,
os_name: <os_name>,
platform: <platform>,
architecture: <architecture>
}
"""
return self._get_json(self.BUILD_PLATFORM_ENDPOINT)
def get_job_types(self):
"""
Gets a list of job types stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>
symbol: <symbol>
name: <name>
...
}
"""
return self._get_json(self.JOBTYPE_ENDPOINT)
def get_machines(self):
"""
Gets a list of machines stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>,
name: <name>,
first_timestamp: <first_timestamp>,
last_timestamp: <last_timestamp>
}
"""
return self._get_json(self.MACHINE_ENDPOINT)
def get_machine_platforms(self):
"""
Gets a list of machine platforms stored inside Treeherder
Returns a list of dictionaries with the following properties:
{
id: <id>
os_name: <os_name>
platform: <platform>,
architecture: <architecture>
}
"""
return self._get_json(self.MACHINE_PLATFORM_ENDPOINT)
def get_pushes(self, project, **params):
"""
Gets pushes from project, filtered by parameters
By default this method will just return the latest 10 pushes (if they exist)
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.PUSH_ENDPOINT, project, **params)
def get_resultsets(self, project, **params):
"""
Returns get_pushes for backwards compatibility
"""
logger.warn("DEPRECATED: TreeherderClient.get_resultsets(), please use get_pushes() instead.")
return self.get_pushes(project)
def get_jobs(self, project, **params):
"""
Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.JOBS_ENDPOINT, project, **params)
def get_job_details(self, **params):
"""
Gets jobs from project, filtered by parameters
Typically you would filter by `job_guid`. Example:
details = client.get_job_details(job_guid='22fb7e6b-d4e7-43cb-a268-c897c1112c0f/0')
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.JOB_DETAIL_ENDPOINT, None,
**params)
def get_job_log_url(self, project, **params):
"""
Gets job log url, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json(self.JOB_LOG_URL_ENDPOINT, project,
**params)
def post_collection(self, project, collection_inst):
"""
Sends a treeherder collection to the server
:param project: project to submit data for
:param collection_inst: a TreeherderCollection instance
"""
if not isinstance(collection_inst, TreeherderCollection):
msg = '{0} should be an instance of TreeherderCollection'.format(
type(collection_inst))
raise TreeherderClientError(msg, [])
if not collection_inst.endpoint_base:
msg = "{0}: collection endpoint_base property not defined".format(
self.__class__.__name__)
raise TreeherderClientError(msg, [])
if not collection_inst.data:
msg = "{0}: collection data property not defined".format(
self.__class__.__name__)
raise TreeherderClientError(msg, [])
collection_inst.validate()
return self._post_json(project, collection_inst.endpoint_base,
collection_inst.get_collection_data())
class TreeherderClientError(Exception):
def __init__(self, msg, Errors):
Exception.__init__(self, msg)
self.Errors = Errors
|
tojon/treeherder
|
treeherder/client/thclient/client.py
|
Python
|
mpl-2.0
| 24,194
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Commonly used utils in pandas-on-Spark.
"""
import functools
from collections import OrderedDict
from contextlib import contextmanager
import os
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
TYPE_CHECKING,
cast,
no_type_check,
overload,
)
import warnings
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import DoubleType
import pandas as pd
from pandas.api.types import is_list_like
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas.typedef.typehints import as_spark_type
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.base import IndexOpsMixin # noqa: F401 (SPARK-34943)
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.internal import InternalFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
ERROR_MESSAGE_CANNOT_COMBINE = (
"Cannot combine the series or dataframe because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option."
)
SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.pyspark.enabled"
def same_anchor(
this: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
that: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
) -> bool:
"""
Check if the anchors of the given DataFrame or Series are the same or not.
"""
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
if isinstance(this, InternalFrame):
this_internal = this
else:
assert isinstance(this, (DataFrame, IndexOpsMixin)), type(this)
this_internal = this._internal
if isinstance(that, InternalFrame):
that_internal = that
else:
assert isinstance(that, (DataFrame, IndexOpsMixin)), type(that)
that_internal = that._internal
return (
this_internal.spark_frame is that_internal.spark_frame
and this_internal.index_level == that_internal.index_level
and all(
spark_column_equals(this_scol, that_scol)
for this_scol, that_scol in zip(
this_internal.index_spark_columns, that_internal.index_spark_columns
)
)
)
def combine_frames(
this: "DataFrame",
*args: Union["DataFrame", "Series"],
how: str = "full",
preserve_order_column: bool = False
) -> "DataFrame":
"""
This method combines `this` DataFrame with a different `that` DataFrame or
Series from a different DataFrame.
It returns a DataFrame that has prefix `this_` and `that_` to distinct
the columns names from both DataFrames
It internally performs a join operation which can be expensive in general.
So, if `compute.ops_on_diff_frames` option is False,
this method throws an exception.
"""
from pyspark.pandas.config import get_option
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series
if all(isinstance(arg, Series) for arg in args):
assert all(
same_anchor(arg, args[0]) for arg in args
), "Currently only one different DataFrame (from given Series) is supported"
assert not same_anchor(this, args[0]), "We don't need to combine. All series is in this."
that = args[0]._psdf[list(args)]
elif len(args) == 1 and isinstance(args[0], DataFrame):
assert isinstance(args[0], DataFrame)
assert not same_anchor(
this, args[0]
), "We don't need to combine. `this` and `that` are same."
that = args[0]
else:
raise AssertionError("args should be single DataFrame or " "single/multiple Series")
if get_option("compute.ops_on_diff_frames"):
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = internal.spark_frame.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
this_internal = resolve(this._internal, "this")
that_internal = resolve(that._internal, "that")
this_index_map = list(
zip(
this_internal.index_spark_column_names,
this_internal.index_names,
this_internal.index_fields,
)
)
that_index_map = list(
zip(
that_internal.index_spark_column_names,
that_internal.index_names,
that_internal.index_fields,
)
)
assert len(this_index_map) == len(that_index_map)
join_scols = []
merged_index_scols = []
# Note that the order of each element in index_map is guaranteed according to the index
# level.
this_and_that_index_map = list(zip(this_index_map, that_index_map))
this_sdf = this_internal.spark_frame.alias("this")
that_sdf = that_internal.spark_frame.alias("that")
# If the same named index is found, that's used.
index_column_names = []
index_use_extension_dtypes = []
for (
i,
((this_column, this_name, this_field), (that_column, that_name, that_field)),
) in enumerate(this_and_that_index_map):
if this_name == that_name:
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = scol_for(this_sdf, this_column)
that_scol = scol_for(that_sdf, that_column)
join_scol = this_scol == that_scol
join_scols.append(join_scol)
column_name = SPARK_INDEX_NAME_FORMAT(i)
index_column_names.append(column_name)
index_use_extension_dtypes.append(
any(field.is_extension_dtype for field in [this_field, that_field])
)
merged_index_scols.append(
F.when(this_scol.isNotNull(), this_scol).otherwise(that_scol).alias(column_name)
)
else:
raise ValueError("Index names must be exactly matched currently.")
assert len(join_scols) > 0, "cannot join with no overlapping index names"
joined_df = this_sdf.join(that_sdf, on=join_scols, how=how)
if preserve_order_column:
order_column = [scol_for(this_sdf, NATURAL_ORDER_COLUMN_NAME)]
else:
order_column = []
joined_df = joined_df.select(
*merged_index_scols,
*(
scol_for(this_sdf, this_internal.spark_column_name_for(label))
for label in this_internal.column_labels
),
*(
scol_for(that_sdf, that_internal.spark_column_name_for(label))
for label in that_internal.column_labels
),
*order_column
)
index_spark_columns = [scol_for(joined_df, col) for col in index_column_names]
index_columns = set(index_column_names)
new_data_columns = [
col
for col in joined_df.columns
if col not in index_columns and col != NATURAL_ORDER_COLUMN_NAME
]
schema = joined_df.select(*index_spark_columns, *new_data_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field, use_extension_dtypes=use_extension_dtypes)
for struct_field, use_extension_dtypes in zip(
schema.fields[: len(index_spark_columns)], index_use_extension_dtypes
)
]
data_fields = [
InternalField.from_struct_field(
struct_field, use_extension_dtypes=field.is_extension_dtype
)
for struct_field, field in zip(
schema.fields[len(index_spark_columns) :],
this_internal.data_fields + that_internal.data_fields,
)
]
level = max(this_internal.column_labels_level, that_internal.column_labels_level)
def fill_label(label: Optional[Tuple]) -> List:
if label is None:
return ([""] * (level - 1)) + [None]
else:
return ([""] * (level - len(label))) + list(label)
column_labels = [
tuple(["this"] + fill_label(label)) for label in this_internal.column_labels
] + [tuple(["that"] + fill_label(label)) for label in that_internal.column_labels]
column_label_names = (
cast(List[Optional[Tuple]], [None]) * (1 + level - this_internal.column_labels_level)
) + this_internal.column_label_names
return DataFrame(
InternalFrame(
spark_frame=joined_df,
index_spark_columns=index_spark_columns,
index_names=this_internal.index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(joined_df, col) for col in new_data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
def align_diff_frames(
resolve_func: Callable[["DataFrame", List[Tuple], List[Tuple]], Tuple["Series", Tuple]],
this: "DataFrame",
that: "DataFrame",
fillna: bool = True,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method aligns two different DataFrames with a given `func`. Columns are resolved and
handled within the given `func`.
To use this, `compute.ops_on_diff_frames` should be True, for now.
:param resolve_func: Takes aligned (joined) DataFrame, the column of the current DataFrame, and
the column of another DataFrame. It returns an iterable that produces Series.
>>> from pyspark.pandas.config import set_option, reset_option
>>>
>>> set_option("compute.ops_on_diff_frames", True)
>>>
>>> psdf1 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>> psdf2 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>>
>>> def func(psdf, this_column_labels, that_column_labels):
... psdf # conceptually this is A + B.
...
... # Within this function, Series from A or B can be performed against `psdf`.
... this_label = this_column_labels[0] # this is ('a',) from psdf1.
... that_label = that_column_labels[0] # this is ('a',) from psdf2.
... new_series = (psdf[this_label] - psdf[that_label]).rename(str(this_label))
...
... # This new series will be placed in new DataFrame.
... yield (new_series, this_label)
>>>
>>>
>>> align_diff_frames(func, psdf1, psdf2).sort_index()
a
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
>>> reset_option("compute.ops_on_diff_frames")
:param this: a DataFrame to align
:param that: another DataFrame to align
:param fillna: If True, it fills missing values in non-common columns in both `this` and `that`.
Otherwise, it returns as are.
:param how: join way. In addition, it affects how `resolve_func` resolves the column conflict.
- full: `resolve_func` should resolve only common columns from 'this' and 'that' DataFrames.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` and
'that_columns' in this function are B, C and B, C.
- left: `resolve_func` should resolve columns including that columns.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` is
B, C but `that_columns` are B, C, D.
- inner: Same as 'full' mode; however, internally performs inner join instead.
:return: Aligned DataFrame
"""
from pyspark.pandas.frame import DataFrame
assert how == "full" or how == "left" or how == "inner"
this_column_labels = this._internal.column_labels
that_column_labels = that._internal.column_labels
common_column_labels = set(this_column_labels).intersection(that_column_labels)
# 1. Perform the join given two dataframes.
combined = combine_frames(this, that, how=how, preserve_order_column=preserve_order_column)
# 2. Apply the given function to transform the columns in a batch and keep the new columns.
combined_column_labels = combined._internal.column_labels
that_columns_to_apply = []
this_columns_to_apply = []
additional_that_columns = []
columns_to_keep = []
column_labels_to_keep = []
for combined_label in combined_column_labels:
for common_label in common_column_labels:
if combined_label == tuple(["this", *common_label]):
this_columns_to_apply.append(combined_label)
break
elif combined_label == tuple(["that", *common_label]):
that_columns_to_apply.append(combined_label)
break
else:
if how == "left" and combined_label in [
tuple(["that", *label]) for label in that_column_labels
]:
# In this case, we will drop `that_columns` in `columns_to_keep` but passes
# it later to `func`. `func` should resolve it.
# Note that adding this into a separate list (`additional_that_columns`)
# is intentional so that `this_columns` and `that_columns` can be paired.
additional_that_columns.append(combined_label)
elif fillna:
columns_to_keep.append(F.lit(None).cast(DoubleType()).alias(str(combined_label)))
column_labels_to_keep.append(combined_label)
else:
columns_to_keep.append(combined._psser_for(combined_label))
column_labels_to_keep.append(combined_label)
that_columns_to_apply += additional_that_columns
# Should extract columns to apply and do it in a batch in case
# it adds new columns for example.
if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0:
psser_set, column_labels_set = zip(
*resolve_func(combined, this_columns_to_apply, that_columns_to_apply)
)
columns_applied = list(psser_set)
column_labels_applied = list(column_labels_set)
else:
columns_applied = []
column_labels_applied = []
applied = DataFrame(
combined._internal.with_new_columns(
columns_applied + columns_to_keep,
column_labels=column_labels_applied + column_labels_to_keep,
)
) # type: DataFrame
# 3. Restore the names back and deduplicate columns.
this_labels = OrderedDict()
# Add columns in an order of its original frame.
for this_label in this_column_labels:
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels and this_label == new_label[1:]:
this_labels[new_label[1:]] = new_label
# After that, we will add the rest columns.
other_labels = OrderedDict()
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels:
other_labels[new_label[1:]] = new_label
psdf = applied[list(this_labels.values()) + list(other_labels.values())]
psdf.columns = psdf.columns.droplevel()
return psdf
def is_testing() -> bool:
"""Indicates whether Spark is currently running tests."""
return "SPARK_TESTING" in os.environ
def default_session(conf: Optional[Dict[str, Any]] = None) -> spark.SparkSession:
if conf is None:
conf = dict()
builder = spark.SparkSession.builder.appName("pandas-on-Spark")
for key, value in conf.items():
builder = builder.config(key, value)
# Currently, pandas-on-Spark is dependent on such join due to 'compute.ops_on_diff_frames'
# configuration. This is needed with Spark 3.0+.
builder.config("spark.sql.analyzer.failAmbiguousSelfJoin", False)
if is_testing():
builder.config("spark.executor.allowSparkContext", False)
return builder.getOrCreate()
@contextmanager
def sql_conf(
pairs: Dict[str, Any], *, spark: Optional[spark.SparkSession] = None
) -> Iterator[None]:
"""
A convenient context manager to set `value` to the Spark SQL configuration `key` and
then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
if spark is None:
spark = default_session()
keys = pairs.keys()
new_values = pairs.values()
old_values = [spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
spark.conf.unset(key)
else:
spark.conf.set(key, old_value)
def validate_arguments_and_invoke_function(
pobj: Union[pd.DataFrame, pd.Series],
pandas_on_spark_func: Callable,
pandas_func: Callable,
input_args: Dict,
) -> Any:
"""
Invokes a pandas function.
This is created because different versions of pandas support different parameters, and as a
result when we code against the latest version, our users might get a confusing
"got an unexpected keyword argument" error if they are using an older version of pandas.
This function validates all the arguments, removes the ones that are not supported if they
are simply the default value (i.e. most likely the user didn't explicitly specify it). It
throws a TypeError if the user explicitly specify an argument that is not supported by the
pandas version available.
For example usage, look at DataFrame.to_html().
:param pobj: the pandas DataFrame or Series to operate on
:param pandas_on_spark_func: pandas-on-Spark function, used to get default parameter values
:param pandas_func: pandas function, used to check whether pandas supports all the arguments
:param input_args: arguments to pass to the pandas function, often created by using locals().
Make sure locals() call is at the top of the function so it captures only
input parameters, rather than local variables.
:return: whatever pandas_func returns
"""
import inspect
# Makes a copy since whatever passed in is likely created by locals(), and we can't delete
# 'self' key from that.
args = input_args.copy()
del args["self"]
if "kwargs" in args:
# explode kwargs
kwargs = args["kwargs"]
del args["kwargs"]
args = {**args, **kwargs}
pandas_on_spark_params = inspect.signature(pandas_on_spark_func).parameters
pandas_params = inspect.signature(pandas_func).parameters
for param in pandas_on_spark_params.values():
if param.name not in pandas_params:
if args[param.name] == param.default:
del args[param.name]
else:
raise TypeError(
(
"The pandas version [%s] available does not support parameter '%s' "
+ "for function '%s'."
)
% (pd.__version__, param.name, pandas_func.__name__)
)
args["self"] = pobj
return pandas_func(**args)
@no_type_check
def lazy_property(fn: Callable[[Any], Any]) -> property:
"""
Decorator that makes a property lazy-evaluated.
Copied from https://stevenloria.com/lazy-properties/
"""
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def wrapped_lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
def deleter(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
return wrapped_lazy_property.deleter(deleter)
def scol_for(sdf: spark.DataFrame, column_name: str) -> spark.Column:
"""Return Spark Column for the given column name."""
return sdf["`{}`".format(column_name)]
def column_labels_level(column_labels: List[Tuple]) -> int:
"""Return the level of the column index."""
if len(column_labels) == 0:
return 1
else:
levels = set(1 if label is None else len(label) for label in column_labels)
assert len(levels) == 1, levels
return list(levels)[0]
def name_like_string(name: Optional[Union[Any, Tuple]]) -> str:
"""
Return the name-like strings from str or tuple of str
Examples
--------
>>> name = 'abc'
>>> name_like_string(name)
'abc'
>>> name = ('abc',)
>>> name_like_string(name)
'abc'
>>> name = ('a', 'b', 'c')
>>> name_like_string(name)
'(a, b, c)'
"""
if name is None:
name = ("__none__",)
elif is_list_like(name):
name = tuple([str(n) for n in name])
else:
name = (str(name),)
return ("(%s)" % ", ".join(name)) if len(name) > 1 else name[0]
def is_name_like_tuple(value: Any, allow_none: bool = True, check_type: bool = False) -> bool:
"""
Check the given tuple is be able to be used as a name.
Examples
--------
>>> is_name_like_tuple(('abc',))
True
>>> is_name_like_tuple((1,))
True
>>> is_name_like_tuple(('abc', 1, None))
True
>>> is_name_like_tuple(('abc', 1, None), check_type=True)
True
>>> is_name_like_tuple((1.0j,))
True
>>> is_name_like_tuple(tuple())
False
>>> is_name_like_tuple((list('abc'),))
False
>>> is_name_like_tuple(('abc', 1, None), allow_none=False)
False
>>> is_name_like_tuple((1.0j,), check_type=True)
False
"""
if value is None:
return allow_none
elif not isinstance(value, tuple):
return False
elif len(value) == 0:
return False
elif not allow_none and any(v is None for v in value):
return False
elif any(is_list_like(v) or isinstance(v, slice) for v in value):
return False
elif check_type:
return all(
v is None or as_spark_type(type(v), raise_error=False) is not None for v in value
)
else:
return True
def is_name_like_value(
value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False
) -> bool:
"""
Check the given value is like a name.
Examples
--------
>>> is_name_like_value('abc')
True
>>> is_name_like_value(1)
True
>>> is_name_like_value(None)
True
>>> is_name_like_value(('abc',))
True
>>> is_name_like_value(1.0j)
True
>>> is_name_like_value(list('abc'))
False
>>> is_name_like_value(None, allow_none=False)
False
>>> is_name_like_value(('abc',), allow_tuple=False)
False
>>> is_name_like_value(1.0j, check_type=True)
False
"""
if value is None:
return allow_none
elif isinstance(value, tuple):
return allow_tuple and is_name_like_tuple(
value, allow_none=allow_none, check_type=check_type
)
elif is_list_like(value) or isinstance(value, slice):
return False
elif check_type:
return as_spark_type(type(value), raise_error=False) is not None
else:
return True
def validate_axis(axis: Optional[Union[int, str]] = 0, none_axis: int = 0) -> int:
"""Check the given axis is valid."""
# convert to numeric axis
axis = cast(
Dict[Optional[Union[int, str]], int], {None: none_axis, "index": 0, "columns": 1}
).get(axis, axis)
if axis in (none_axis, 0, 1):
return cast(int, axis)
else:
raise ValueError("No axis named {0}".format(axis))
def validate_bool_kwarg(value: Any, arg_name: str) -> Optional[bool]:
"""Ensures that argument passed in arg_name is of type bool."""
if not (isinstance(value, bool) or value is None):
raise TypeError(
'For argument "{}" expected type bool, received '
"type {}.".format(arg_name, type(value).__name__)
)
return value
def validate_how(how: str) -> str:
"""Check the given how for join is valid."""
if how == "full":
warnings.warn(
"Warning: While pandas-on-Spark will accept 'full', you should use 'outer' "
+ "instead to be compatible with the pandas merge API",
UserWarning,
)
if how == "outer":
# 'outer' in pandas equals 'full' in Spark
how = "full"
if how not in ("inner", "left", "right", "full"):
raise ValueError(
"The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']",
)
return how
@overload
def verify_temp_column_name(df: spark.DataFrame, column_name_or_label: str) -> str:
...
@overload
def verify_temp_column_name(
df: "DataFrame", column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
...
def verify_temp_column_name(
df: Union["DataFrame", spark.DataFrame], column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
"""
Verify that the given column name does not exist in the given pandas-on-Spark or
Spark DataFrame.
The temporary column names should start and end with `__`. In addition, `column_name_or_label`
expects a single string, or column labels when `df` is a pandas-on-Spark DataFrame.
>>> psdf = ps.DataFrame({("x", "a"): ['a', 'b', 'c']})
>>> psdf["__dummy__"] = 0
>>> psdf[("", "__dummy__")] = 1
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x __dummy__
a __dummy__
0 a 0 1
1 b 0 1
2 c 0 1
>>> verify_temp_column_name(psdf, '__tmp__')
('__tmp__', '')
>>> verify_temp_column_name(psdf, ('', '__tmp__'))
('', '__tmp__')
>>> verify_temp_column_name(psdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `(__dummy__, )` ...
>>> verify_temp_column_name(psdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: ... `(, __dummy__)` ...
>>> verify_temp_column_name(psdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('dummy', '')
>>> verify_temp_column_name(psdf, ('', 'dummy'))
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('', 'dummy')
>>> internal = psdf._internal.resolved_copy
>>> sdf = internal.spark_frame
>>> sdf.select(internal.data_spark_columns).show() # doctest: +NORMALIZE_WHITESPACE
+------+---------+-------------+
|(x, a)|__dummy__|(, __dummy__)|
+------+---------+-------------+
| a| 0| 1|
| b| 0| 1|
| c| 0| 1|
+------+---------+-------------+
>>> verify_temp_column_name(sdf, '__tmp__')
'__tmp__'
>>> verify_temp_column_name(sdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `__dummy__` ... '(x, a)', '__dummy__', '(, __dummy__)', ...
>>> verify_temp_column_name(sdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: <class 'tuple'>
>>> verify_temp_column_name(sdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should start and end with `__`: dummy
"""
from pyspark.pandas.frame import DataFrame
if isinstance(df, DataFrame):
if isinstance(column_name_or_label, str):
column_name = column_name_or_label
level = df._internal.column_labels_level
column_name_or_label = tuple([column_name_or_label] + ([""] * (level - 1)))
else:
column_name = name_like_string(column_name_or_label)
assert any(len(label) > 0 for label in column_name_or_label) and all(
label == "" or (label.startswith("__") and label.endswith("__"))
for label in column_name_or_label
), "The temporary column name should be empty or start and end with `__`: {}".format(
column_name_or_label
)
assert all(
column_name_or_label != label for label in df._internal.column_labels
), "The given column name `{}` already exists in the pandas-on-Spark DataFrame: {}".format(
name_like_string(column_name_or_label), df.columns
)
df = df._internal.resolved_copy.spark_frame
else:
assert isinstance(column_name_or_label, str), type(column_name_or_label)
assert column_name_or_label.startswith("__") and column_name_or_label.endswith(
"__"
), "The temporary column name should start and end with `__`: {}".format(
column_name_or_label
)
column_name = column_name_or_label
assert isinstance(df, spark.DataFrame), type(df)
assert (
column_name not in df.columns
), "The given column name `{}` already exists in the Spark DataFrame: {}".format(
column_name, df.columns
)
return column_name_or_label
def spark_column_equals(left: spark.Column, right: spark.Column) -> bool:
"""
Check both `left` and `right` have the same expressions.
>>> spark_column_equals(F.lit(0), F.lit(0))
True
>>> spark_column_equals(F.lit(0) + 1, F.lit(0) + 1)
True
>>> spark_column_equals(F.lit(0) + 1, F.lit(0) + 2)
False
>>> sdf1 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf1["x"] + 1)
True
>>> sdf2 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf2["x"] + 1)
False
"""
return left._jc.equals(right._jc) # type: ignore
def compare_null_first(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNull() & right.isNotNull()
)
def compare_null_last(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNotNull() & right.isNull()
)
def compare_disallow_null(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return left.isNotNull() & right.isNotNull() & comp(left, right)
def compare_allow_null(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return left.isNull() | right.isNull() | comp(left, right)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.utils
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.utils.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.utils tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.utils,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
maropu/spark
|
python/pyspark/pandas/utils.py
|
Python
|
apache-2.0
| 33,908
|
import datetime
import decimal
import itertools
import re
import time
import urllib2
import uuid
import warnings
from operator import itemgetter
try:
import dateutil
except ImportError:
dateutil = None
else:
import dateutil.parser
import pymongo
import gridfs
from bson import Binary, DBRef, SON, ObjectId
from mongoengine.errors import ValidationError
from mongoengine.python_support import (PY3, bin_type, txt_type,
str_types, StringIO)
from base import (BaseField, ComplexBaseField, ObjectIdField, GeoJsonBaseField,
get_document, BaseDocument)
from queryset import DO_NOTHING, QuerySet
from document import Document, EmbeddedDocument
from connection import get_db, DEFAULT_CONNECTION_NAME
try:
from PIL import Image, ImageOps
except ImportError:
Image = None
ImageOps = None
__all__ = [
'StringField', 'URLField', 'EmailField', 'IntField', 'LongField',
'FloatField', 'DecimalField', 'BooleanField', 'DateTimeField',
'ComplexDateTimeField', 'EmbeddedDocumentField', 'ObjectIdField',
'GenericEmbeddedDocumentField', 'DynamicField', 'ListField',
'SortedListField', 'EmbeddedDocumentListField', 'DictField',
'MapField', 'ReferenceField', 'CachedReferenceField',
'GenericReferenceField', 'BinaryField', 'GridFSError', 'GridFSProxy',
'FileField', 'ImageGridFsProxy', 'ImproperlyConfigured', 'ImageField',
'GeoPointField', 'PointField', 'LineStringField', 'PolygonField',
'SequenceField', 'UUIDField', 'MultiPointField', 'MultiLineStringField',
'MultiPolygonField', 'GeoJsonBaseField']
RECURSIVE_REFERENCE_CONSTANT = 'self'
class StringField(BaseField):
"""A unicode string field.
"""
def __init__(self, regex=None, max_length=None, min_length=None, **kwargs):
self.regex = re.compile(regex) if regex else None
self.max_length = max_length
self.min_length = min_length
super(StringField, self).__init__(**kwargs)
def to_python(self, value):
if isinstance(value, unicode):
return value
try:
value = value.decode('utf-8')
except:
pass
return value
def validate(self, value):
if not isinstance(value, basestring):
self.error('StringField only accepts string values')
if self.max_length is not None and len(value) > self.max_length:
self.error('String value is too long')
if self.min_length is not None and len(value) < self.min_length:
self.error('String value is too short')
if self.regex is not None and self.regex.match(value) is None:
self.error('String value did not match validation regex')
def lookup_member(self, member_name):
return None
def prepare_query_value(self, op, value):
if not isinstance(op, basestring):
return value
if op.lstrip('i') in ('startswith', 'endswith', 'contains', 'exact'):
flags = 0
if op.startswith('i'):
flags = re.IGNORECASE
op = op.lstrip('i')
regex = r'%s'
if op == 'startswith':
regex = r'^%s'
elif op == 'endswith':
regex = r'%s$'
elif op == 'exact':
regex = r'^%s$'
# escape unsafe characters which could lead to a re.error
value = re.escape(value)
value = re.compile(regex % value, flags)
return super(StringField, self).prepare_query_value(op, value)
class URLField(StringField):
"""A field that validates input as an URL.
.. versionadded:: 0.3
"""
_URL_REGEX = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
# domain...
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def __init__(self, verify_exists=False, url_regex=None, **kwargs):
self.verify_exists = verify_exists
self.url_regex = url_regex or self._URL_REGEX
super(URLField, self).__init__(**kwargs)
def validate(self, value):
if not self.url_regex.match(value):
self.error('Invalid URL: %s' % value)
return
if self.verify_exists:
warnings.warn(
"The URLField verify_exists argument has intractable security "
"and performance issues. Accordingly, it has been deprecated.",
DeprecationWarning)
try:
request = urllib2.Request(value)
urllib2.urlopen(request)
except Exception, e:
self.error('This URL appears to be a broken link: %s' % e)
class EmailField(StringField):
"""A field that validates input as an E-Mail-Address.
.. versionadded:: 0.4
"""
EMAIL_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"'
# domain (max length of an ICAAN TLD is 22 characters)
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,253}[A-Z0-9])?\.)+[A-Z]{2,22}$', re.IGNORECASE
)
def validate(self, value):
if not EmailField.EMAIL_REGEX.match(value):
self.error('Invalid Mail-address: %s' % value)
super(EmailField, self).validate(value)
class IntField(BaseField):
"""An 32-bit integer field.
"""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(IntField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = int(value)
except ValueError:
pass
return value
def validate(self, value):
try:
value = int(value)
except:
self.error('%s could not be converted to int' % value)
if self.min_value is not None and value < self.min_value:
self.error('Integer value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Integer value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return super(IntField, self).prepare_query_value(op, int(value))
class LongField(BaseField):
"""An 64-bit integer field.
"""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(LongField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = long(value)
except ValueError:
pass
return value
def validate(self, value):
try:
value = long(value)
except:
self.error('%s could not be converted to long' % value)
if self.min_value is not None and value < self.min_value:
self.error('Long value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Long value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return super(LongField, self).prepare_query_value(op, long(value))
class FloatField(BaseField):
"""An floating point number field.
"""
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(FloatField, self).__init__(**kwargs)
def to_python(self, value):
try:
value = float(value)
except ValueError:
pass
return value
def validate(self, value):
if isinstance(value, int):
value = float(value)
if not isinstance(value, float):
self.error('FloatField only accepts float values')
if self.min_value is not None and value < self.min_value:
self.error('Float value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Float value is too large')
def prepare_query_value(self, op, value):
if value is None:
return value
return super(FloatField, self).prepare_query_value(op, float(value))
class DecimalField(BaseField):
"""A fixed-point decimal number field.
.. versionchanged:: 0.8
.. versionadded:: 0.3
"""
def __init__(self, min_value=None, max_value=None, force_string=False,
precision=2, rounding=decimal.ROUND_HALF_UP, **kwargs):
"""
:param min_value: Validation rule for the minimum acceptable value.
:param max_value: Validation rule for the maximum acceptable value.
:param force_string: Store as a string.
:param precision: Number of decimal places to store.
:param rounding: The rounding rule from the python decimal library:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Defaults to: ``decimal.ROUND_HALF_UP``
"""
self.min_value = min_value
self.max_value = max_value
self.force_string = force_string
self.precision = precision
self.rounding = rounding
super(DecimalField, self).__init__(**kwargs)
def to_python(self, value):
if value is None:
return value
# Convert to string for python 2.6 before casting to Decimal
try:
value = decimal.Decimal("%s" % value)
except decimal.InvalidOperation:
return value
return value.quantize(decimal.Decimal(".%s" % ("0" * self.precision)), rounding=self.rounding)
def to_mongo(self, value, use_db_field=True):
if value is None:
return value
if self.force_string:
return unicode(value)
return float(self.to_python(value))
def validate(self, value):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, basestring):
value = unicode(value)
try:
value = decimal.Decimal(value)
except Exception, exc:
self.error('Could not convert value to decimal: %s' % exc)
if self.min_value is not None and value < self.min_value:
self.error('Decimal value is too small')
if self.max_value is not None and value > self.max_value:
self.error('Decimal value is too large')
def prepare_query_value(self, op, value):
return super(DecimalField, self).prepare_query_value(op, self.to_mongo(value))
class BooleanField(BaseField):
"""A boolean field type.
.. versionadded:: 0.1.2
"""
def to_python(self, value):
try:
value = bool(value)
except ValueError:
pass
return value
def validate(self, value):
if not isinstance(value, bool):
self.error('BooleanField only accepts boolean values')
class DateTimeField(BaseField):
"""A datetime field.
Uses the python-dateutil library if available alternatively use time.strptime
to parse the dates. Note: python-dateutil's parser is fully featured and when
installed you can utilise it to convert varying types of date formats into valid
python datetime objects.
Note: Microseconds are rounded to the nearest millisecond.
Pre UTC microsecond support is effectively broken.
Use :class:`~mongoengine.fields.ComplexDateTimeField` if you
need accurate microsecond support.
"""
def validate(self, value):
new_value = self.to_mongo(value)
if not isinstance(new_value, (datetime.datetime, datetime.date)):
self.error(u'cannot parse date "%s"' % value)
def to_mongo(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if callable(value):
return value()
if not isinstance(value, basestring):
return None
# Attempt to parse a datetime:
if dateutil:
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError):
return None
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
return None
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value,
'%Y-%m-%d %H:%M:%S')[:6], **kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value,
'%Y-%m-%d %H:%M')[:5], **kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value,
'%Y-%m-%d')[:3], **kwargs)
except ValueError:
return None
def prepare_query_value(self, op, value):
return super(DateTimeField, self).prepare_query_value(op, self.to_mongo(value))
class ComplexDateTimeField(StringField):
"""
ComplexDateTimeField handles microseconds exactly instead of rounding
like DateTimeField does.
Derives from a StringField so you can do `gte` and `lte` filtering by
using lexicographical comparison when filtering / sorting strings.
The stored string has the following format:
YYYY,MM,DD,HH,MM,SS,NNNNNN
Where NNNNNN is the number of microseconds of the represented `datetime`.
The `,` as the separator can be easily modified by passing the `separator`
keyword when initializing the field.
.. versionadded:: 0.5
"""
def __init__(self, separator=',', **kwargs):
self.names = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
self.separator = separator
self.format = separator.join(['%Y', '%m', '%d', '%H', '%M', '%S', '%f'])
super(ComplexDateTimeField, self).__init__(**kwargs)
def _convert_from_datetime(self, val):
"""
Convert a `datetime` object to a string representation (which will be
stored in MongoDB). This is the reverse function of
`_convert_from_string`.
>>> a = datetime(2011, 6, 8, 20, 26, 24, 92284)
>>> ComplexDateTimeField()._convert_from_datetime(a)
'2011,06,08,20,26,24,092284'
"""
return val.strftime(self.format)
def _convert_from_string(self, data):
"""
Convert a string representation to a `datetime` object (the object you
will manipulate). This is the reverse function of
`_convert_from_datetime`.
>>> a = '2011,06,08,20,26,24,092284'
>>> ComplexDateTimeField()._convert_from_string(a)
datetime.datetime(2011, 6, 8, 20, 26, 24, 92284)
"""
values = map(int, data.split(self.separator))
return datetime.datetime(*values)
def __get__(self, instance, owner):
data = super(ComplexDateTimeField, self).__get__(instance, owner)
if data is None:
return None if self.null else datetime.datetime.now()
if isinstance(data, datetime.datetime):
return data
return self._convert_from_string(data)
def __set__(self, instance, value):
value = self._convert_from_datetime(value) if value else value
return super(ComplexDateTimeField, self).__set__(instance, value)
def validate(self, value):
value = self.to_python(value)
if not isinstance(value, datetime.datetime):
self.error('Only datetime objects may used in a '
'ComplexDateTimeField')
def to_python(self, value):
original_value = value
try:
return self._convert_from_string(value)
except:
return original_value
def to_mongo(self, value):
value = self.to_python(value)
return self._convert_from_datetime(value)
def prepare_query_value(self, op, value):
return super(ComplexDateTimeField, self).prepare_query_value(op, self._convert_from_datetime(value))
class EmbeddedDocumentField(BaseField):
"""An embedded document field - with a declared document_type.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
"""
def __init__(self, document_type, **kwargs):
if not isinstance(document_type, basestring):
if not issubclass(document_type, EmbeddedDocument):
self.error('Invalid embedded document class provided to an '
'EmbeddedDocumentField')
self.document_type_obj = document_type
super(EmbeddedDocumentField, self).__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, basestring):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def to_python(self, value):
if not isinstance(value, self.document_type):
return self.document_type._from_son(value)
return value
def to_mongo(self, value, use_db_field=True, fields=[]):
if not isinstance(value, self.document_type):
return value
return self.document_type.to_mongo(value, use_db_field,
fields=fields)
def validate(self, value, clean=True):
"""Make sure that the document instance is an instance of the
EmbeddedDocument subclass provided when the document was defined.
"""
# Using isinstance also works for subclasses of self.document
if not isinstance(value, self.document_type):
self.error('Invalid embedded document instance provided to an '
'EmbeddedDocumentField')
self.document_type.validate(value, clean)
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
def prepare_query_value(self, op, value):
if not isinstance(value, self.document_type):
value = self.document_type._from_son(value)
super(EmbeddedDocumentField, self).prepare_query_value(op, value)
return self.to_mongo(value)
class GenericEmbeddedDocumentField(BaseField):
"""A generic embedded document field - allows any
:class:`~mongoengine.EmbeddedDocument` to be stored.
Only valid values are subclasses of :class:`~mongoengine.EmbeddedDocument`.
.. note ::
You can use the choices param to limit the acceptable
EmbeddedDocument types
"""
def prepare_query_value(self, op, value):
return super(GenericEmbeddedDocumentField, self).prepare_query_value(op, self.to_mongo(value))
def to_python(self, value):
if isinstance(value, dict):
doc_cls = get_document(value['_cls'])
value = doc_cls._from_son(value)
return value
def validate(self, value, clean=True):
if not isinstance(value, EmbeddedDocument):
self.error('Invalid embedded document instance provided to an '
'GenericEmbeddedDocumentField')
value.validate(clean=clean)
def to_mongo(self, document, use_db_field=True):
if document is None:
return None
data = document.to_mongo(use_db_field)
if '_cls' not in data:
data['_cls'] = document._class_name
return data
class DynamicField(BaseField):
"""A truly dynamic field type capable of handling different and varying
types of data.
Used by :class:`~mongoengine.DynamicDocument` to handle dynamic data"""
def to_mongo(self, value):
"""Convert a Python type to a MongoDB compatible type.
"""
if isinstance(value, basestring):
return value
if hasattr(value, 'to_mongo'):
cls = value.__class__
val = value.to_mongo()
# If we its a document thats not inherited add _cls
if (isinstance(value, Document)):
val = {"_ref": value.to_dbref(), "_cls": cls.__name__}
if (isinstance(value, EmbeddedDocument)):
val['_cls'] = cls.__name__
return val
if not isinstance(value, (dict, list, tuple)):
return value
is_list = False
if not hasattr(value, 'items'):
is_list = True
value = dict([(k, v) for k, v in enumerate(value)])
data = {}
for k, v in value.iteritems():
data[k] = self.to_mongo(v)
value = data
if is_list: # Convert back to a list
value = [v for k, v in sorted(data.iteritems(), key=itemgetter(0))]
return value
def to_python(self, value):
if isinstance(value, dict) and '_cls' in value:
doc_cls = get_document(value['_cls'])
if '_ref' in value:
value = doc_cls._get_db().dereference(value['_ref'])
return doc_cls._from_son(value)
return super(DynamicField, self).to_python(value)
def lookup_member(self, member_name):
return member_name
def prepare_query_value(self, op, value):
if isinstance(value, basestring):
from mongoengine.fields import StringField
return StringField().prepare_query_value(op, value)
return super(DynamicField, self).prepare_query_value(op, self.to_mongo(value))
def validate(self, value, clean=True):
if hasattr(value, "validate"):
value.validate(clean=clean)
class ListField(ComplexBaseField):
"""A list field that wraps a standard field, allowing multiple instances
of the field to be used as a list in the database.
If using with ReferenceFields see: :ref:`one-to-many-with-listfields`
.. note::
Required means it cannot be empty - as the default for ListFields is []
"""
def __init__(self, field=None, **kwargs):
self.field = field
kwargs.setdefault('default', lambda: [])
super(ListField, self).__init__(**kwargs)
def validate(self, value):
"""Make sure that a list of valid fields is being used.
"""
if (not isinstance(value, (list, tuple, QuerySet)) or
isinstance(value, basestring)):
self.error('Only lists and tuples may be used in a list field')
super(ListField, self).validate(value)
def prepare_query_value(self, op, value):
if self.field:
if op in ('set', 'unset') and (not isinstance(value, basestring)
and not isinstance(value, BaseDocument)
and hasattr(value, '__iter__')):
return [self.field.prepare_query_value(op, v) for v in value]
return self.field.prepare_query_value(op, value)
return super(ListField, self).prepare_query_value(op, value)
class EmbeddedDocumentListField(ListField):
"""A :class:`~mongoengine.ListField` designed specially to hold a list of
embedded documents to provide additional query helpers.
.. note::
The only valid list values are subclasses of
:class:`~mongoengine.EmbeddedDocument`.
.. versionadded:: 0.9
"""
def __init__(self, document_type, *args, **kwargs):
"""
:param document_type: The type of
:class:`~mongoengine.EmbeddedDocument` the list will hold.
:param args: Arguments passed directly into the parent
:class:`~mongoengine.ListField`.
:param kwargs: Keyword arguments passed directly into the parent
:class:`~mongoengine.ListField`.
"""
super(EmbeddedDocumentListField, self).__init__(
field=EmbeddedDocumentField(document_type), **kwargs
)
class SortedListField(ListField):
"""A ListField that sorts the contents of its list before writing to
the database in order to ensure that a sorted list is always
retrieved.
.. warning::
There is a potential race condition when handling lists. If you set /
save the whole list then other processes trying to save the whole list
as well could overwrite changes. The safest way to append to a list is
to perform a push operation.
.. versionadded:: 0.4
.. versionchanged:: 0.6 - added reverse keyword
"""
_ordering = None
_order_reverse = False
def __init__(self, field, **kwargs):
if 'ordering' in kwargs.keys():
self._ordering = kwargs.pop('ordering')
if 'reverse' in kwargs.keys():
self._order_reverse = kwargs.pop('reverse')
super(SortedListField, self).__init__(field, **kwargs)
def to_mongo(self, value):
value = super(SortedListField, self).to_mongo(value)
if self._ordering is not None:
return sorted(value, key=itemgetter(self._ordering),
reverse=self._order_reverse)
return sorted(value, reverse=self._order_reverse)
def key_not_string(d):
""" Helper function to recursively determine if any key in a dictionary is
not a string.
"""
for k, v in d.items():
if not isinstance(k, basestring) or (isinstance(v, dict) and key_not_string(v)):
return True
def key_has_dot_or_dollar(d):
""" Helper function to recursively determine if any key in a dictionary
contains a dot or a dollar sign.
"""
for k, v in d.items():
if ('.' in k or '$' in k) or (isinstance(v, dict) and key_has_dot_or_dollar(v)):
return True
class DictField(ComplexBaseField):
"""A dictionary field that wraps a standard Python dictionary. This is
similar to an embedded document, but the structure is not defined.
.. note::
Required means it cannot be empty - as the default for DictFields is {}
.. versionadded:: 0.3
.. versionchanged:: 0.5 - Can now handle complex / varying types of data
"""
def __init__(self, basecls=None, field=None, *args, **kwargs):
self.field = field
self.basecls = basecls or BaseField
if not issubclass(self.basecls, BaseField):
self.error('DictField only accepts dict values')
kwargs.setdefault('default', lambda: {})
super(DictField, self).__init__(*args, **kwargs)
def validate(self, value):
"""Make sure that a list of valid fields is being used.
"""
if not isinstance(value, dict):
self.error('Only dictionaries may be used in a DictField')
if key_not_string(value):
msg = ("Invalid dictionary key - documents must "
"have only string keys")
self.error(msg)
if key_has_dot_or_dollar(value):
self.error('Invalid dictionary key name - keys may not contain "."'
' or "$" characters')
super(DictField, self).validate(value)
def lookup_member(self, member_name):
return DictField(basecls=self.basecls, db_field=member_name)
def prepare_query_value(self, op, value):
match_operators = ['contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith',
'exact', 'iexact']
if op in match_operators and isinstance(value, basestring):
return StringField().prepare_query_value(op, value)
if hasattr(self.field, 'field'):
if op in ('set', 'unset') and isinstance(value, dict):
return dict(
(k, self.field.prepare_query_value(op, v))
for k, v in value.items())
return self.field.prepare_query_value(op, value)
return super(DictField, self).prepare_query_value(op, value)
class MapField(DictField):
"""A field that maps a name to a specified field type. Similar to
a DictField, except the 'value' of each item must match the specified
field type.
.. versionadded:: 0.5
"""
def __init__(self, field=None, *args, **kwargs):
if not isinstance(field, BaseField):
self.error('Argument to MapField constructor must be a valid '
'field')
super(MapField, self).__init__(field=field, *args, **kwargs)
class ReferenceField(BaseField):
"""A reference to a document that will be automatically dereferenced on
access (lazily).
Use the `reverse_delete_rule` to handle what should happen if the document
the field is referencing is deleted. EmbeddedDocuments, DictFields and
MapFields does not support reverse_delete_rule and an `InvalidDocumentError`
will be raised if trying to set on one of these Document / Field types.
The options are:
* DO_NOTHING - don't do anything (default).
* NULLIFY - Updates the reference to null.
* CASCADE - Deletes the documents associated with the reference.
* DENY - Prevent the deletion of the reference object.
* PULL - Pull the reference from a :class:`~mongoengine.fields.ListField`
of references
Alternative syntax for registering delete rules (useful when implementing
bi-directional delete rules)
.. code-block:: python
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
Bar.register_delete_rule(Foo, 'bar', NULLIFY)
.. note ::
`reverse_delete_rule` does not trigger pre / post delete signals to be
triggered.
.. versionchanged:: 0.5 added `reverse_delete_rule`
"""
def __init__(self, document_type, dbref=False,
reverse_delete_rule=DO_NOTHING, **kwargs):
"""Initialises the Reference Field.
:param dbref: Store the reference as :class:`~pymongo.dbref.DBRef`
or as the :class:`~pymongo.objectid.ObjectId`.id .
:param reverse_delete_rule: Determines what to do when the referring
object is deleted
"""
if not isinstance(document_type, basestring):
if not issubclass(document_type, (Document, basestring)):
self.error('Argument to ReferenceField constructor must be a '
'document class or a string')
self.dbref = dbref
self.document_type_obj = document_type
self.reverse_delete_rule = reverse_delete_rule
super(ReferenceField, self).__init__(**kwargs)
@property
def document_type(self):
if isinstance(self.document_type_obj, basestring):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def __get__(self, instance, owner):
"""Descriptor to allow lazy dereferencing.
"""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
value = instance._data.get(self.name)
self._auto_dereference = instance._fields[self.name]._auto_dereference
# Dereference DBRefs
if self._auto_dereference and isinstance(value, DBRef):
value = self.document_type._get_db().dereference(value)
if value is not None:
instance._data[self.name] = self.document_type._from_son(value)
return super(ReferenceField, self).__get__(instance, owner)
def to_mongo(self, document):
if isinstance(document, DBRef):
if not self.dbref:
return document.id
return document
id_field_name = self.document_type._meta['id_field']
id_field = self.document_type._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
id_ = document
id_ = id_field.to_mongo(id_)
if self.dbref:
collection = self.document_type._get_collection_name()
return DBRef(collection, id_)
return id_
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type.
"""
if (not self.dbref and
not isinstance(value, (DBRef, Document, EmbeddedDocument))):
collection = self.document_type._get_collection_name()
value = DBRef(collection, self.document_type.id.to_python(value))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
super(ReferenceField, self).prepare_query_value(op, value)
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, (self.document_type, DBRef)):
self.error("A ReferenceField only accepts DBRef or documents")
if isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been '
'saved to the database')
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
class CachedReferenceField(BaseField):
"""
A referencefield with cache fields to purpose pseudo-joins
.. versionadded:: 0.9
"""
def __init__(self, document_type, fields=[], auto_sync=True, **kwargs):
"""Initialises the Cached Reference Field.
:param fields: A list of fields to be cached in document
:param auto_sync: if True documents are auto updated.
"""
if not isinstance(document_type, basestring) and \
not issubclass(document_type, (Document, basestring)):
self.error('Argument to CachedReferenceField constructor must be a'
' document class or a string')
self.auto_sync = auto_sync
self.document_type_obj = document_type
self.fields = fields
super(CachedReferenceField, self).__init__(**kwargs)
def start_listener(self):
from mongoengine import signals
signals.post_save.connect(self.on_document_pre_save,
sender=self.document_type)
def on_document_pre_save(self, sender, document, created, **kwargs):
if not created:
update_kwargs = dict(
('set__%s__%s' % (self.name, k), v)
for k, v in document._delta()[0].items()
if k in self.fields)
if update_kwargs:
filter_kwargs = {}
filter_kwargs[self.name] = document
self.owner_document.objects(
**filter_kwargs).update(**update_kwargs)
def to_python(self, value):
if isinstance(value, dict):
collection = self.document_type._get_collection_name()
value = DBRef(
collection, self.document_type.id.to_python(value['_id']))
return value
@property
def document_type(self):
if isinstance(self.document_type_obj, basestring):
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
self.document_type_obj = self.owner_document
else:
self.document_type_obj = get_document(self.document_type_obj)
return self.document_type_obj
def __get__(self, instance, owner):
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available
value = instance._data.get(self.name)
self._auto_dereference = instance._fields[self.name]._auto_dereference
# Dereference DBRefs
if self._auto_dereference and isinstance(value, DBRef):
value = self.document_type._get_db().dereference(value)
if value is not None:
instance._data[self.name] = self.document_type._from_son(value)
return super(CachedReferenceField, self).__get__(instance, owner)
def to_mongo(self, document):
id_field_name = self.document_type._meta['id_field']
id_field = self.document_type._fields[id_field_name]
doc_tipe = self.document_type
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.pk
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
self.error('Only accept a document object')
value = SON((
("_id", id_field.to_mongo(id_)),
))
value.update(dict(document.to_mongo(fields=self.fields)))
return value
def prepare_query_value(self, op, value):
if value is None:
return None
if isinstance(value, Document):
if value.pk is None:
self.error('You can only reference documents once they have'
' been saved to the database')
return {'_id': value.pk}
raise NotImplementedError
def validate(self, value):
if not isinstance(value, (self.document_type)):
self.error("A CachedReferenceField only accepts documents")
if isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been '
'saved to the database')
def lookup_member(self, member_name):
return self.document_type._fields.get(member_name)
def sync_all(self):
"""
Sync all cached fields on demand.
Caution: this operation may be slower.
"""
update_key = 'set__%s' % self.name
for doc in self.document_type.objects:
filter_kwargs = {}
filter_kwargs[self.name] = doc
update_kwargs = {}
update_kwargs[update_key] = doc
self.owner_document.objects(
**filter_kwargs).update(**update_kwargs)
class GenericReferenceField(BaseField):
"""A reference to *any* :class:`~mongoengine.document.Document` subclass
that will be automatically dereferenced on access (lazily).
.. note ::
* Any documents used as a generic reference must be registered in the
document registry. Importing the model will automatically register
it.
* You can use the choices param to limit the acceptable Document types
.. versionadded:: 0.3
"""
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
self._auto_dereference = instance._fields[self.name]._auto_dereference
if self._auto_dereference and isinstance(value, (dict, SON)):
instance._data[self.name] = self.dereference(value)
return super(GenericReferenceField, self).__get__(instance, owner)
def validate(self, value):
if not isinstance(value, (Document, DBRef, dict, SON)):
self.error('GenericReferences can only contain documents')
if isinstance(value, (dict, SON)):
if '_ref' not in value or '_cls' not in value:
self.error('GenericReferences can only contain documents')
# We need the id from the saved object to create the DBRef
elif isinstance(value, Document) and value.id is None:
self.error('You can only reference documents once they have been'
' saved to the database')
def dereference(self, value):
doc_cls = get_document(value['_cls'])
reference = value['_ref']
doc = doc_cls._get_db().dereference(reference)
if doc is not None:
doc = doc_cls._from_son(doc)
return doc
def to_mongo(self, document, use_db_field=True):
if document is None:
return None
if isinstance(document, (dict, SON)):
return document
id_field_name = document.__class__._meta['id_field']
id_field = document.__class__._fields[id_field_name]
if isinstance(document, Document):
# We need the id from the saved object to create the DBRef
id_ = document.id
if id_ is None:
self.error('You can only reference documents once they have'
' been saved to the database')
else:
id_ = document
id_ = id_field.to_mongo(id_)
collection = document._get_collection_name()
ref = DBRef(collection, id_)
return SON((
('_cls', document._class_name),
('_ref', ref)
))
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
class BinaryField(BaseField):
"""A binary data field.
"""
def __init__(self, max_bytes=None, **kwargs):
self.max_bytes = max_bytes
super(BinaryField, self).__init__(**kwargs)
def __set__(self, instance, value):
"""Handle bytearrays in python 3.1"""
if PY3 and isinstance(value, bytearray):
value = bin_type(value)
return super(BinaryField, self).__set__(instance, value)
def to_mongo(self, value):
return Binary(value)
def validate(self, value):
if not isinstance(value, (bin_type, txt_type, Binary)):
self.error("BinaryField only accepts instances of "
"(%s, %s, Binary)" % (
bin_type.__name__, txt_type.__name__))
if self.max_bytes is not None and len(value) > self.max_bytes:
self.error('Binary value is too long')
class GridFSError(Exception):
pass
class GridFSProxy(object):
"""Proxy object to handle writing and reading of files to and from GridFS
.. versionadded:: 0.4
.. versionchanged:: 0.5 - added optional size param to read
.. versionchanged:: 0.6 - added collection name param
"""
_fs = None
def __init__(self, grid_id=None, key=None,
instance=None,
db_alias=DEFAULT_CONNECTION_NAME,
collection_name='fs'):
self.grid_id = grid_id # Store GridFS id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if hasattr(obj, name):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __nonzero__(self):
return bool(self.grid_id)
def __getstate__(self):
self_dict = self.__dict__
self_dict['_fs'] = None
return self_dict
def __copy__(self):
copied = GridFSProxy()
copied.__dict__.update(self.__getstate__())
return copied
def __deepcopy__(self, memo):
return self.__copy__()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def __str__(self):
name = getattr(
self.get(), 'filename', self.grid_id) if self.get() else '(no file)'
return '<%s: %s>' % (self.__class__.__name__, name)
def __eq__(self, other):
if isinstance(other, GridFSProxy):
return ((self.grid_id == other.grid_id) and
(self.collection_name == other.collection_name) and
(self.db_alias == other.db_alias))
else:
return False
@property
def fs(self):
if not self._fs:
self._fs = gridfs.GridFS(
get_db(self.db_alias), self.collection_name)
return self._fs
def get(self, id=None):
if id:
self.grid_id = id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = self.fs.get(self.grid_id)
return self.gridout
except:
# File has been deleted
return None
def new_file(self, **kwargs):
self.newfile = self.fs.new_file(**kwargs)
self.grid_id = self.newfile._id
self._mark_as_changed()
def put(self, file_obj, **kwargs):
if self.grid_id:
raise GridFSError('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = self.fs.put(file_obj, **kwargs)
self._mark_as_changed()
def write(self, string):
if self.grid_id:
if not self.newfile:
raise GridFSError('This document already has a file. Either '
'delete it or call replace to overwrite it')
else:
self.new_file()
self.newfile.write(string)
def writelines(self, lines):
if not self.newfile:
self.new_file()
self.grid_id = self.newfile._id
self.newfile.writelines(lines)
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except:
return ""
def delete(self):
# Delete file from GridFS, FileField still remains
self.fs.delete(self.grid_id)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def replace(self, file_obj, **kwargs):
self.delete()
self.put(file_obj, **kwargs)
def close(self):
if self.newfile:
self.newfile.close()
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class FileField(BaseField):
"""A GridFS storage field.
.. versionadded:: 0.4
.. versionchanged:: 0.5 added optional size param for read
.. versionchanged:: 0.6 added db_alias for multidb support
"""
proxy_class = GridFSProxy
def __init__(self,
db_alias=DEFAULT_CONNECTION_NAME,
collection_name="fs", **kwargs):
super(FileField, self).__init__(**kwargs)
self.collection_name = collection_name
self.db_alias = db_alias
def __get__(self, instance, owner):
if instance is None:
return self
# Check if a file already exists for this model
grid_file = instance._data.get(self.name)
if not isinstance(grid_file, self.proxy_class):
grid_file = self.get_proxy_obj(key=self.name, instance=instance)
instance._data[self.name] = grid_file
if not grid_file.key:
grid_file.key = self.name
grid_file.instance = instance
return grid_file
def __set__(self, instance, value):
key = self.name
if ((hasattr(value, 'read') and not
isinstance(value, GridFSProxy)) or isinstance(value, str_types)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except:
pass
# Create a new proxy object as we don't already have one
instance._data[key] = self.get_proxy_obj(
key=key, instance=instance)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def get_proxy_obj(self, key, instance, db_alias=None, collection_name=None):
if db_alias is None:
db_alias = self.db_alias
if collection_name is None:
collection_name = self.collection_name
return self.proxy_class(key=key, instance=instance,
db_alias=db_alias,
collection_name=collection_name)
def to_mongo(self, value):
# Store the GridFS file id in MongoDB
if isinstance(value, self.proxy_class) and value.grid_id is not None:
return value.grid_id
return None
def to_python(self, value):
if value is not None:
return self.proxy_class(value,
collection_name=self.collection_name,
db_alias=self.db_alias)
def validate(self, value):
if value.grid_id is not None:
if not isinstance(value, self.proxy_class):
self.error('FileField only accepts GridFSProxy values')
if not isinstance(value.grid_id, ObjectId):
self.error('Invalid GridFSProxy value')
class ImageGridFsProxy(GridFSProxy):
"""
Proxy for ImageField
versionadded: 0.6
"""
def put(self, file_obj, **kwargs):
"""
Insert a image in database
applying field properties (size, thumbnail_size)
"""
field = self.instance._fields[self.key]
# Handle nested fields
if hasattr(field, 'field') and isinstance(field.field, FileField):
field = field.field
try:
img = Image.open(file_obj)
img_format = img.format
except Exception, e:
raise ValidationError('Invalid image: %s' % e)
# Progressive JPEG
progressive = img.info.get('progressive') or False
if (kwargs.get('progressive') and
isinstance(kwargs.get('progressive'), bool) and
img_format == 'JPEG'):
progressive = True
else:
progressive = False
if (field.size and (img.size[0] > field.size['width'] or
img.size[1] > field.size['height'])):
size = field.size
if size['force']:
img = ImageOps.fit(img,
(size['width'],
size['height']),
Image.ANTIALIAS)
else:
img.thumbnail((size['width'],
size['height']),
Image.ANTIALIAS)
thumbnail = None
if field.thumbnail_size:
size = field.thumbnail_size
if size['force']:
thumbnail = ImageOps.fit(
img, (size['width'], size['height']), Image.ANTIALIAS)
else:
thumbnail = img.copy()
thumbnail.thumbnail((size['width'],
size['height']),
Image.ANTIALIAS)
if thumbnail:
thumb_id = self._put_thumbnail(thumbnail, img_format, progressive)
else:
thumb_id = None
w, h = img.size
io = StringIO()
img.save(io, img_format, progressive=progressive)
io.seek(0)
return super(ImageGridFsProxy, self).put(io,
width=w,
height=h,
format=img_format,
thumbnail_id=thumb_id,
**kwargs)
def delete(self, *args, **kwargs):
# deletes thumbnail
out = self.get()
if out and out.thumbnail_id:
self.fs.delete(out.thumbnail_id)
return super(ImageGridFsProxy, self).delete(*args, **kwargs)
def _put_thumbnail(self, thumbnail, format, progressive, **kwargs):
w, h = thumbnail.size
io = StringIO()
thumbnail.save(io, format, progressive=progressive)
io.seek(0)
return self.fs.put(io, width=w,
height=h,
format=format,
**kwargs)
@property
def size(self):
"""
return a width, height of image
"""
out = self.get()
if out:
return out.width, out.height
@property
def format(self):
"""
return format of image
ex: PNG, JPEG, GIF, etc
"""
out = self.get()
if out:
return out.format
@property
def thumbnail(self):
"""
return a gridfs.grid_file.GridOut
representing a thumbnail of Image
"""
out = self.get()
if out and out.thumbnail_id:
return self.fs.get(out.thumbnail_id)
def write(self, *args, **kwargs):
raise RuntimeError("Please use \"put\" method instead")
def writelines(self, *args, **kwargs):
raise RuntimeError("Please use \"put\" method instead")
class ImproperlyConfigured(Exception):
pass
class ImageField(FileField):
"""
A Image File storage field.
@size (width, height, force):
max size to store images, if larger will be automatically resized
ex: size=(800, 600, True)
@thumbnail (width, height, force):
size to generate a thumbnail
.. versionadded:: 0.6
"""
proxy_class = ImageGridFsProxy
def __init__(self, size=None, thumbnail_size=None,
collection_name='images', **kwargs):
if not Image:
raise ImproperlyConfigured("PIL library was not found")
params_size = ('width', 'height', 'force')
extra_args = dict(size=size, thumbnail_size=thumbnail_size)
for att_name, att in extra_args.items():
value = None
if isinstance(att, (tuple, list)):
if PY3:
value = dict(itertools.zip_longest(params_size, att,
fillvalue=None))
else:
value = dict(map(None, params_size, att))
setattr(self, att_name, value)
super(ImageField, self).__init__(
collection_name=collection_name,
**kwargs)
class SequenceField(BaseField):
"""Provides a sequential counter see:
http://www.mongodb.org/display/DOCS/Object+IDs#ObjectIDs-SequenceNumbers
.. note::
Although traditional databases often use increasing sequence
numbers for primary keys. In MongoDB, the preferred approach is to
use Object IDs instead. The concept is that in a very large
cluster of machines, it is easier to create an object ID than have
global, uniformly increasing sequence numbers.
Use any callable as `value_decorator` to transform calculated counter into
any value suitable for your needs, e.g. string or hexadecimal
representation of the default integer counter value.
.. versionadded:: 0.5
.. versionchanged:: 0.8 added `value_decorator`
"""
_auto_gen = True
COLLECTION_NAME = 'mongoengine.counters'
VALUE_DECORATOR = int
def __init__(self, collection_name=None, db_alias=None, sequence_name=None,
value_decorator=None, *args, **kwargs):
self.collection_name = collection_name or self.COLLECTION_NAME
self.db_alias = db_alias or DEFAULT_CONNECTION_NAME
self.sequence_name = sequence_name
self.value_decorator = (callable(value_decorator) and
value_decorator or self.VALUE_DECORATOR)
return super(SequenceField, self).__init__(*args, **kwargs)
def generate(self):
"""
Generate and Increment the counter
"""
sequence_name = self.get_sequence_name()
sequence_id = "%s.%s" % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
counter = collection.find_and_modify(query={"_id": sequence_id},
update={"$inc": {"next": 1}},
new=True,
upsert=True)
return self.value_decorator(counter['next'])
def set_next_value(self, value):
"""Helper method to set the next sequence value"""
sequence_name = self.get_sequence_name()
sequence_id = "%s.%s" % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
counter = collection.find_and_modify(query={"_id": sequence_id},
update={"$set": {"next": value}},
new=True,
upsert=True)
return self.value_decorator(counter['next'])
def get_next_value(self):
"""Helper method to get the next value for previewing.
.. warning:: There is no guarantee this will be the next value
as it is only fixed on set.
"""
sequence_name = self.get_sequence_name()
sequence_id = "%s.%s" % (sequence_name, self.name)
collection = get_db(alias=self.db_alias)[self.collection_name]
data = collection.find_one({"_id": sequence_id})
if data:
return self.value_decorator(data['next'] + 1)
return self.value_decorator(1)
def get_sequence_name(self):
if self.sequence_name:
return self.sequence_name
owner = self.owner_document
if issubclass(owner, Document):
return owner._get_collection_name()
else:
return ''.join('_%s' % c if c.isupper() else c
for c in owner._class_name).strip('_').lower()
def __get__(self, instance, owner):
value = super(SequenceField, self).__get__(instance, owner)
if value is None and instance._initialised:
value = self.generate()
instance._data[self.name] = value
instance._mark_as_changed(self.name)
return value
def __set__(self, instance, value):
if value is None and instance._initialised:
value = self.generate()
return super(SequenceField, self).__set__(instance, value)
def prepare_query_value(self, op, value):
"""
This method is overridden in order to convert the query value into to required
type. We need to do this in order to be able to successfully compare query
values passed as string, the base implementation returns the value as is.
"""
return self.value_decorator(value)
def to_python(self, value):
if value is None:
value = self.generate()
return value
class UUIDField(BaseField):
"""A UUID field.
.. versionadded:: 0.6
"""
_binary = None
def __init__(self, binary=True, **kwargs):
"""
Store UUID data in the database
:param binary: if False store as a string.
.. versionchanged:: 0.8.0
.. versionchanged:: 0.6.19
"""
self._binary = binary
super(UUIDField, self).__init__(**kwargs)
def to_python(self, value):
if not self._binary:
original_value = value
try:
if not isinstance(value, basestring):
value = unicode(value)
return uuid.UUID(value)
except:
return original_value
return value
def to_mongo(self, value):
if not self._binary:
return unicode(value)
elif isinstance(value, basestring):
return uuid.UUID(value)
return value
def prepare_query_value(self, op, value):
if value is None:
return None
return self.to_mongo(value)
def validate(self, value):
if not isinstance(value, uuid.UUID):
if not isinstance(value, basestring):
value = str(value)
try:
value = uuid.UUID(value)
except Exception, exc:
self.error('Could not convert to UUID: %s' % exc)
class GeoPointField(BaseField):
"""A list storing a longitude and latitude coordinate.
.. note:: this represents a generic point in a 2D plane and a legacy way of
representing a geo point. It admits 2d indexes but not "2dsphere" indexes
in MongoDB > 2.4 which are more natural for modeling geospatial points.
See :ref:`geospatial-indexes`
.. versionadded:: 0.4
"""
_geo_index = pymongo.GEO2D
def validate(self, value):
"""Make sure that a geo-value is of type (x, y)
"""
if not isinstance(value, (list, tuple)):
self.error('GeoPointField can only accept tuples or lists '
'of (x, y)')
if not len(value) == 2:
self.error("Value (%s) must be a two-dimensional point" %
repr(value))
elif (not isinstance(value[0], (float, int)) or
not isinstance(value[1], (float, int))):
self.error(
"Both values (%s) in point must be float or int" % repr(value))
class PointField(GeoJsonBaseField):
"""A GeoJSON field storing a longitude and latitude coordinate.
The data is represented as:
.. code-block:: js
{ "type" : "Point" ,
"coordinates" : [x, y]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.4
.. versionadded:: 0.8
"""
_type = "Point"
class LineStringField(GeoJsonBaseField):
"""A GeoJSON field storing a line of longitude and latitude coordinates.
The data is represented as:
.. code-block:: js
{ "type" : "LineString" ,
"coordinates" : [[x1, y1], [x1, y1] ... [xn, yn]]}
You can either pass a dict with the full information or a list of points.
Requires mongodb >= 2.4
.. versionadded:: 0.8
"""
_type = "LineString"
class PolygonField(GeoJsonBaseField):
"""A GeoJSON field storing a polygon of longitude and latitude coordinates.
The data is represented as:
.. code-block:: js
{ "type" : "Polygon" ,
"coordinates" : [[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]}
You can either pass a dict with the full information or a list
of LineStrings. The first LineString being the outside and the rest being
holes.
Requires mongodb >= 2.4
.. versionadded:: 0.8
"""
_type = "Polygon"
class MultiPointField(GeoJsonBaseField):
"""A GeoJSON field storing a list of Points.
The data is represented as:
.. code-block:: js
{ "type" : "MultiPoint" ,
"coordinates" : [[x1, y1], [x2, y2]]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.6
.. versionadded:: 0.9
"""
_type = "MultiPoint"
class MultiLineStringField(GeoJsonBaseField):
"""A GeoJSON field storing a list of LineStrings.
The data is represented as:
.. code-block:: js
{ "type" : "MultiLineString" ,
"coordinates" : [[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]]}
You can either pass a dict with the full information or a list of points.
Requires mongodb >= 2.6
.. versionadded:: 0.9
"""
_type = "MultiLineString"
class MultiPolygonField(GeoJsonBaseField):
"""A GeoJSON field storing list of Polygons.
The data is represented as:
.. code-block:: js
{ "type" : "MultiPolygon" ,
"coordinates" : [[
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
], [
[[x1, y1], [x1, y1] ... [xn, yn]],
[[x1, y1], [x1, y1] ... [xn, yn]]
]
}
You can either pass a dict with the full information or a list
of Polygons.
Requires mongodb >= 2.6
.. versionadded:: 0.9
"""
_type = "MultiPolygon"
|
starsirius/mongoengine
|
mongoengine/fields.py
|
Python
|
mit
| 66,293
|
import os
import pickle
from studio import fs_tracker
def clientFunction(args, files):
print('client function call with args ' +
str(args) + ' and files ' + str(files))
modelfile = 'model.dat'
filename = files.get('model') or \
os.path.join(fs_tracker.get_artifact('modeldir'), modelfile)
print("Trying to load file {}".format(filename))
if os.path.exists(filename):
with open(filename, 'rb') as f:
args = pickle.loads(f.read()) + 1
else:
print("Trying to write file {}".format(filename))
with open(filename, 'wb') as f:
f.write(pickle.dumps(args, protocol=2))
return args
if __name__ == "__main__":
clientFunction('test', {})
|
studioml/studio
|
studio/completion_service/completion_service_testfunc_saveload.py
|
Python
|
apache-2.0
| 734
|
#!/usr/bin/env python
"""
Util to count which clients are most used.
Example usage:
utils/source.py tweets.jsonl > sources.html
"""
import json
import fileinput
from collections import defaultdict
summary = defaultdict(int)
for line in fileinput.input():
tweet = json.loads(line)
source = tweet["source"]
summary[source] += 1
sumsort = sorted(summary, key=summary.get, reverse=True)
print(
"""<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Twitter client sources</title>
<style>
body {
font-family: Arial, Helvetica, sans-serif;
font-size: 12pt;
margin-left: auto;
margin-right: auto;
width: 95%;
}
footer#page {
margin-top: 15px;
clear: both;
width: 100%;
text-align: center;
font-size: 20pt;
font-weight: heavy;
}
header {
text-align: center;
margin-bottom: 20px;
}
</style>
</head>
<body>
<header>
<h1>Twitter client sources</h1>
<em>created on the command line with <a href="https://github.com/DocNow/twarc">twarc</a></em>
</header>
<table>
"""
)
for source in sumsort:
print("<tr><td>{}</td><td>{}</td></tr>".format(source, summary[source]))
print(
"""
</table>
<footer id="page">
<hr>
<br>
created on the command line with <a href="https://github.com/DocNow/twarc">twarc</a>.
<br>
<br>
</footer>
</body>
</html>"""
)
# End of file
|
DocNow/twarc
|
utils/source.py
|
Python
|
mit
| 1,412
|
#!/usr/bin/env python
"""
Django administration utility.
"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "credentials.settings.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
edx/credentials
|
manage.py
|
Python
|
agpl-3.0
| 301
|
# -*- coding: utf-8 -*-
# Copyright © 2016 Manuel Kaufmann
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
name = "meta_template"
def set_site(self, site):
self.site = site
MetaTemplate.site = site
return super(Plugin, self).set_site(site)
class MetaTemplate(Directive):
""" Restructured text extension for inserting custom templates."""
option_spec = {
'title': directives.unchanged,
'href': directives.unchanged,
'url': directives.unchanged,
'target': directives.unchanged,
'src': directives.unchanged,
'style': directives.unchanged,
}
has_content = True
required_arguments = 1
optional_arguments = 0
def __init__(self, *args, **kwargs):
super(MetaTemplate, self).__init__(*args, **kwargs)
def run(self):
template_name = self.arguments[0] + '.tmpl'
self.options.update({
'content': self.content,
})
output = self.site.template_system.render_template(
template_name,
None,
self.options,
)
return [nodes.raw('', output, format='html')]
directives.register_directive('template', MetaTemplate)
|
getnikola/plugins
|
v7/meta_template/meta_template.py
|
Python
|
mit
| 2,442
|
"""
This is Victor Stinner's pure-Python implementation of PEP 383: the "surrogateescape" error
handler of Python 3.
Source: misc/python/surrogateescape.py in https://bitbucket.org/haypo/misc
"""
# This code is released under the Python license and the BSD 2-clause license
import codecs
import sys
from future import utils
FS_ERRORS = 'surrogateescape'
# # -- Python 2/3 compatibility -------------------------------------
# FS_ERRORS = 'my_surrogateescape'
def u(text):
if utils.PY3:
return text
else:
return text.decode('unicode_escape')
def b(data):
if utils.PY3:
return data.encode('latin1')
else:
return data
if utils.PY3:
_unichr = chr
bytes_chr = lambda code: bytes((code,))
else:
_unichr = unichr
bytes_chr = chr
def surrogateescape_handler(exc):
"""
Pure Python implementation of the PEP 383: the "surrogateescape" error
handler of Python 3. Undecodable bytes will be replaced by a Unicode
character U+DCxx on decoding, and these are translated into the
original bytes on encoding.
"""
mystring = exc.object[exc.start:exc.end]
try:
if isinstance(exc, UnicodeDecodeError):
# mystring is a byte-string in this case
decoded = replace_surrogate_decode(mystring)
elif isinstance(exc, UnicodeEncodeError):
# In the case of u'\udcc3'.encode('ascii',
# 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an
# exception anyway after this function is called, even though I think
# it's doing what it should. It seems that the strict encoder is called
# to encode the unicode string that this function returns ...
decoded = replace_surrogate_encode(mystring)
else:
raise exc
except NotASurrogateError:
raise exc
return (decoded, exc.end)
class NotASurrogateError(Exception):
pass
def replace_surrogate_encode(mystring):
"""
Returns a (unicode) string, not the more logical bytes, because the codecs
register_error functionality expects this.
"""
decoded = []
for ch in mystring:
# if utils.PY3:
# code = ch
# else:
code = ord(ch)
# The following magic comes from Py3.3's Python/codecs.c file:
if not 0xD800 <= code <= 0xDCFF:
# Not a surrogate. Fail with the original exception.
raise exc
# mybytes = [0xe0 | (code >> 12),
# 0x80 | ((code >> 6) & 0x3f),
# 0x80 | (code & 0x3f)]
# Is this a good idea?
if 0xDC00 <= code <= 0xDC7F:
decoded.append(_unichr(code - 0xDC00))
elif code <= 0xDCFF:
decoded.append(_unichr(code - 0xDC00))
else:
raise NotASurrogateError
return str().join(decoded)
def replace_surrogate_decode(mybytes):
"""
Returns a (unicode) string
"""
decoded = []
for ch in mybytes:
# We may be parsing newbytes (in which case ch is an int) or a native
# str on Py2
if isinstance(ch, int):
code = ch
else:
code = ord(ch)
if 0x80 <= code <= 0xFF:
decoded.append(_unichr(0xDC00 + code))
elif code <= 0x7F:
decoded.append(_unichr(code))
else:
# # It may be a bad byte
# # Try swallowing it.
# continue
# print("RAISE!")
raise NotASurrogateError
return str().join(decoded)
def encodefilename(fn):
if FS_ENCODING == 'ascii':
# ASCII encoder of Python 2 expects that the error handler returns a
# Unicode string encodable to ASCII, whereas our surrogateescape error
# handler has to return bytes in 0x80-0xFF range.
encoded = []
for index, ch in enumerate(fn):
code = ord(ch)
if code < 128:
ch = bytes_chr(code)
elif 0xDC80 <= code <= 0xDCFF:
ch = bytes_chr(code - 0xDC00)
else:
raise UnicodeEncodeError(FS_ENCODING,
fn, index, index + 1,
'ordinal not in range(128)')
encoded.append(ch)
return bytes().join(encoded)
elif FS_ENCODING == 'utf-8':
# UTF-8 encoder of Python 2 encodes surrogates, so U+DC80-U+DCFF
# doesn't go through our error handler
encoded = []
for index, ch in enumerate(fn):
code = ord(ch)
if 0xD800 <= code <= 0xDFFF:
if 0xDC80 <= code <= 0xDCFF:
ch = bytes_chr(code - 0xDC00)
encoded.append(ch)
else:
raise UnicodeEncodeError(
FS_ENCODING,
fn, index, index + 1, 'surrogates not allowed')
else:
ch_utf8 = ch.encode('utf-8')
encoded.append(ch_utf8)
return bytes().join(encoded)
else:
return fn.encode(FS_ENCODING, FS_ERRORS)
def decodefilename(fn):
return fn.decode(FS_ENCODING, FS_ERRORS)
FS_ENCODING = 'ascii';
fn = b('[abc\xff]');
encoded = u('[abc\udcff]')
# FS_ENCODING = 'cp932'; fn = b('[abc\x81\x00]'); encoded = u('[abc\udc81\x00]')
# FS_ENCODING = 'UTF-8'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]')
# normalize the filesystem encoding name.
# For example, we expect "utf-8", not "UTF8".
FS_ENCODING = codecs.lookup(FS_ENCODING).name
def register_surrogateescape():
"""
Registers the surrogateescape error handler on Python 2 (only)
"""
if utils.PY3:
return
try:
codecs.lookup_error(FS_ERRORS)
except LookupError:
codecs.register_error(FS_ERRORS, surrogateescape_handler)
if __name__ == '__main__':
pass
# # Tests:
# register_surrogateescape()
# b = decodefilename(fn)
# assert b == encoded, "%r != %r" % (b, encoded)
# c = encodefilename(b)
# assert c == fn, '%r != %r' % (c, fn)
# # print("ok")
|
thonkify/thonkify
|
src/lib/future/utils/surrogateescape.py
|
Python
|
mit
| 6,133
|
#!/usr/bin/env python
from sys import argv
from argparse import ArgumentParser
from collections import OrderedDict
from shutil import which
from subprocess import check_output, PIPE
from yaml import load, dump
arg_rules = OrderedDict([
(("-f", "--freeze-conda"), {
"help": "Write Conda and Pip configs separately",
"action": "store_true"
})
])
def get_args(arg_rules=arg_rules):
parser = ArgumentParser()
for rule_unnamed, rule_named in arg_rules.items():
parser.add_argument(*rule_unnamed, **rule_named)
if len(argv) == 1:
exit(parser.print_help() or 1)
else:
return parser.parse_args()
def get_current_environment():
if which("conda"):
cmd = ["conda", "info", "--envs"]
raw_info = check_output(cmd, universal_newlines=True)
for line in raw_info.strip("\n").split("\n"):
fields = line.split()
if (len(fields) > 2) and (fields[1] == "*"):
return fields[0], fields[2]
return None, None
def get_conda_config(env_prefix):
cmd = ["conda", "env", "export"]
conda_yaml = check_output(cmd, universal_newlines=True, stderr=PIPE)
conda_config = load(conda_yaml)
if "prefix" in conda_config:
del conda_config["prefix"]
for i, dependency in enumerate(conda_config["dependencies"]):
if isinstance(dependency, dict):
if "pip" in dependency:
del dependency["pip"]
if not dependency:
del conda_config["dependencies"][i]
break
return conda_config
def get_pip_config(env_prefix):
pip_location = env_prefix + "/bin/pip"
if which(pip_location):
cmd = [pip_location, "freeze"]
return check_output(cmd, stderr=PIPE, universal_newlines=True)
else:
return ""
def freeze_conda(env_file="environment.yml", req_file="requirements.txt"):
env_name, env_prefix = get_current_environment()
if env_name and (env_name != "root"):
with open(env_file, "w") as environment_file:
conda_config = get_conda_config(env_prefix)
dump(conda_config, environment_file, default_flow_style=False)
with open(req_file, "w") as requirements_file:
freeze = get_pip_config(env_prefix)
requirements_file.write(freeze)
else:
raise OSError("Not inside a Conda environment")
def main():
args = get_args()
if args.freeze_conda:
freeze_conda()
return 0
exit(main())
|
LankyCyril/Snakeknot
|
snakecharmer/__main__.py
|
Python
|
mit
| 2,501
|
from __future__ import absolute_import
from sfepy.linalg import norm_l2_along_axis
from examples.quantum.quantum_common import common
def fun_v(ts, coor, mode=None, **kwargs):
if not mode == 'qp': return
out = {}
C = 0.5
val = C * norm_l2_along_axis(coor, axis=1, squared=True)
val.shape = (val.shape[0], 1, 1)
out['V'] = val
return out
def define(n_eigs=20, tau=0.0):
l = common(fun_v, n_eigs=n_eigs, tau=tau)
return l
|
lokik/sfepy
|
examples/quantum/oscillator.py
|
Python
|
bsd-3-clause
| 461
|
#!/usr/bin/env python
"""
Written by nickcooper-zhangtonghao
Github: https://github.com/nickcooper-zhangtonghao
Email: nickcooper-zhangtonghao@opencloud.tech
Note: Example code For testing purposes only
This code has been released under the terms of the Apache-2.0 license
http://opensource.org/licenses/Apache-2.0
"""
from pyVmomi import vim
from pyVmomi import vmodl
from tools import tasks
from pyVim.connect import SmartConnect, SmartConnectNoSSL, Disconnect
import atexit
import argparse
import getpass
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for talking to vCenter')
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSpehre service to connect to')
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use')
parser.add_argument('--no-ssl',
action='store_true',
help='Skip client SSL verification')
parser.add_argument('-v', '--vm-name',
required=False,
action='store',
help='name of the vm')
parser.add_argument('--uuid',
required=False,
action='store',
help='vmuuid of vm')
parser.add_argument('--port-group',
required=True,
action='store',
help='port group to connect on')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass(
prompt='Enter password')
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def add_nic(si, vm, network_name):
"""
:param si: Service Instance
:param vm: Virtual Machine Object
:param network_name: Name of the Virtual Network
"""
spec = vim.vm.ConfigSpec()
nic_changes = []
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualE1000()
nic_spec.device.deviceInfo = vim.Description()
nic_spec.device.deviceInfo.summary = 'vCenter API test'
content = si.RetrieveContent()
network = get_obj(content, [vim.Network], network_name)
if isinstance(network, vim.OpaqueNetwork):
nic_spec.device.backing = \
vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
nic_spec.device.backing.opaqueNetworkType = \
network.summary.opaqueNetworkType
nic_spec.device.backing.opaqueNetworkId = \
network.summary.opaqueNetworkId
else:
nic_spec.device.backing = \
vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_spec.device.backing.useAutoDetect = False
nic_spec.device.backing.deviceName = network
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic_spec.device.connectable.startConnected = True
nic_spec.device.connectable.allowGuestControl = True
nic_spec.device.connectable.connected = False
nic_spec.device.connectable.status = 'untried'
nic_spec.device.wakeOnLanEnabled = True
nic_spec.device.addressType = 'assigned'
nic_changes.append(nic_spec)
spec.deviceChange = nic_changes
e = vm.ReconfigVM_Task(spec=spec)
print("NIC CARD ADDED")
def main():
args = get_args()
# connect this thing
serviceInstance = None
if args.no_ssl:
serviceInstance = SmartConnectNoSSL(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
else:
serviceInstance = SmartConnect(
host=args.host,
user=args.user,
pwd=args.password,
port=args.port)
# disconnect this thing
atexit.register(Disconnect, serviceInstance)
vm = None
if args.uuid:
search_index = serviceInstance.content.searchIndex
vm = search_index.FindByUuid(None, args.uuid, True)
elif args.vm_name:
content = serviceInstance.RetrieveContent()
vm = get_obj(content, [vim.VirtualMachine], args.vm_name)
if vm:
add_nic(serviceInstance, vm, args.port_group)
else:
print("VM not found")
# start this thing
if __name__ == "__main__":
main()
|
pathcl/pyvmomi-community-samples
|
samples/add_nic_to_vm.py
|
Python
|
apache-2.0
| 5,081
|
#!/usr/bin/env python3
# Simplified from Python Cookbook
def flatten(items):
'''Flattens nested lists.'''
for x in items:
if isinstance(x, list):
yield from flatten(x)
else:
yield x
k = int(input('Block size: ').strip())
print('Enter a list of numbers separated by spaces')
lst = [int(n) for n in input('> ').strip().split()]
reversed_blocks = [lst[i:i+k][::-1] for i in range(0, len(lst), k)]
print(' '.join([str(x) for x in flatten(reversed_blocks)]))
|
spaceporn/dailyprogrammer-challenges
|
easy/014/014.py
|
Python
|
mit
| 507
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
##########################################################################
# Copyright (c) 2015-2017 Krell Institute. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
##########################################################################
from spack import *
class CbtfLanl(CMakePackage):
"""CBTF LANL project contains a memory tool and data center type system
command monitoring tool."""
homepage = "http://sourceforge.net/p/cbtf/wiki/Home/"
version('1.8', branch='master',
git='http://git.code.sf.net/p/cbtf-lanl/cbtf-lanl')
variant('build_type', default='None', values=('None'),
description='CMake build type')
depends_on("cmake@3.0.2:", type='build')
# Dependencies for cbtf-krell
depends_on("mrnet@5.0.1:+lwthreads")
depends_on("xerces-c@3.1.1:")
depends_on("cbtf")
depends_on("cbtf-krell")
parallel = False
build_directory = 'build_cbtf_lanl'
def cmake_args(self):
spec = self.spec
compile_flags = "-O2 -g"
cmake_args = [
'-DCMAKE_CXX_FLAGS=%s' % compile_flags,
'-DCMAKE_C_FLAGS=%s' % compile_flags,
'-DCBTF_DIR=%s' % spec['cbtf'].prefix,
'-DCBTF_KRELL_DIR=%s' % spec['cbtf-krell'].prefix,
'-DMRNET_DIR=%s' % spec['mrnet'].prefix,
'-DXERCESC_DIR=%s' % spec['xerces-c'].prefix,
'-DCMAKE_MODULE_PATH=%s' % join_path(
prefix.share, 'KrellInstitute', 'cmake')]
return cmake_args
|
wscullin/spack
|
var/spack/repos/builtin/packages/cbtf-lanl/package.py
|
Python
|
lgpl-2.1
| 3,486
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a work around for various adb commands on android gce instances.
Some adb commands don't work well when the device is a cloud vm, namely
'push' and 'pull'. With gce instances, moving files through adb can be
painfully slow and hit timeouts, so the methods here just use scp instead.
"""
# pylint: disable=unused-argument
import logging
import os
import subprocess
from devil.android import device_errors
from devil.android.sdk import adb_wrapper
from devil.utils import cmd_helper
# SSH key file for accessing the instances. The keys are created at
# startup and removed & revoked at teardown.
_SSH_KEY_FILE = '/tmp/ssh_android_gce_instance'
class GceAdbWrapper(adb_wrapper.AdbWrapper):
def __init__(self, device_serial):
super(GceAdbWrapper, self).__init__(device_serial)
self._instance_ip = self.Shell('getprop net.gce.ip_address').strip()
#override
def Push(self, local, remote, **kwargs):
"""Pushes an object from the host to the gce instance.
Args:
local: Path on the host filesystem.
remote: Path on the instance filesystem.
"""
adb_wrapper.VerifyLocalFileExists(_SSH_KEY_FILE)
adb_wrapper.VerifyLocalFileExists(local)
if os.path.isdir(local):
self.Shell('mkdir -p %s' % cmd_helper.SingleQuote(remote))
# When the object to be pushed is a directory, adb merges the source dir
# with the destination dir. So if local is a dir, just scp its contents.
for f in os.listdir(local):
self._PushObject(os.path.join(local, f), os.path.join(remote, f))
self.Shell('chmod 777 %s' %
cmd_helper.SingleQuote(os.path.join(remote, f)))
else:
parent_dir = remote[0:remote.rfind('/')]
if parent_dir:
self.Shell('mkdir -p %s' % cmd_helper.SingleQuote(parent_dir))
self._PushObject(local, remote)
self.Shell('chmod 777 %s' % cmd_helper.SingleQuote(remote))
def _PushObject(self, local, remote):
"""Copies an object from the host to the gce instance using scp.
Args:
local: Path on the host filesystem.
remote: Path on the instance filesystem.
"""
cmd = [
'scp',
'-r',
'-i', _SSH_KEY_FILE,
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
local,
'root@%s:%s' % (self._instance_ip, remote)
]
status, _ = cmd_helper.GetCmdStatusAndOutput(cmd)
if status:
raise device_errors.AdbCommandFailedError(
cmd, 'File not reachable on host: %s' % local,
device_serial=str(self))
#override
def Pull(self, remote, local, **kwargs):
"""Pulls a file from the gce instance to the host.
Args:
remote: Path on the instance filesystem.
local: Path on the host filesystem.
"""
adb_wrapper.VerifyLocalFileExists(_SSH_KEY_FILE)
cmd = [
'scp',
'-p',
'-r',
'-i', _SSH_KEY_FILE,
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'root@%s:%s' % (self._instance_ip, remote),
local,
]
status, _ = cmd_helper.GetCmdStatusAndOutput(cmd)
if status:
raise device_errors.AdbCommandFailedError(
cmd, 'File not reachable on host: %s' % local,
device_serial=str(self))
try:
adb_wrapper.VerifyLocalFileExists(local)
except (subprocess.CalledProcessError, IOError):
logging.exception('Error when pulling files from android instance.')
raise device_errors.AdbCommandFailedError(
cmd, 'File not reachable on host: %s' % local,
device_serial=str(self))
#override
def Install(self, apk_path, forward_lock=False, reinstall=False,
sd_card=False, **kwargs):
"""Installs an apk on the gce instance
Args:
apk_path: Host path to the APK file.
forward_lock: (optional) If set forward-locks the app.
reinstall: (optional) If set reinstalls the app, keeping its data.
sd_card: (optional) If set installs on the SD card.
"""
adb_wrapper.VerifyLocalFileExists(_SSH_KEY_FILE)
adb_wrapper.VerifyLocalFileExists(apk_path)
cmd = ['install']
if forward_lock:
cmd.append('-l')
if reinstall:
cmd.append('-r')
if sd_card:
cmd.append('-s')
self.Push(apk_path, '/data/local/tmp/tmp.apk')
cmd = ['pm'] + cmd
cmd.append('/data/local/tmp/tmp.apk')
output = self.Shell(' '.join(cmd))
self.Shell('rm /data/local/tmp/tmp.apk')
if 'Success' not in output:
raise device_errors.AdbCommandFailedError(
cmd, output, device_serial=self._device_serial)
#override
@property
def is_emulator(self):
return True
|
js0701/chromium-crosswalk
|
build/android/devil/android/sdk/gce_adb_wrapper.py
|
Python
|
bsd-3-clause
| 4,853
|
# Copyright 2018 OpenStack Fundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
"""Add standard attributes
Revision ID: 7a9482036ecd
Revises: 666c706fea3b
Create Date: 2018-04-04 10:12:40.399032
"""
# revision identifiers, used by Alembic.
revision = '7a9482036ecd'
down_revision = '666c706fea3b'
def upgrade():
for table in ('bgpvpns', 'bgpvpn_network_associations',
'bgpvpn_router_associations', 'bgpvpn_port_associations'):
op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True))
|
openstack/networking-bgpvpn
|
networking_bgpvpn/neutron/db/migration/alembic_migrations/versions/rocky/expand/7a9482036ecd_add_standard_attributes.py
|
Python
|
apache-2.0
| 1,167
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.