blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
63b5cb24632edd5d430a55027fef2cb179ad50c6
|
308b8c6e8b33d56f23029f3039d2a8d8e8f9ba1f
|
/lab1/sentence_gen.py
|
2d340a1719d2daf35f78abd68ba4a70bd1fb9f60
|
[] |
no_license
|
myrlund/tdt4275-nlp
|
029dd79b5366e30771f9cfd960324d538741071b
|
fce13d7cbb3dbba0494cab94f64c1b9a4f7e06a6
|
refs/heads/master
| 2021-01-19T07:09:25.870108
| 2013-04-23T13:14:36
| 2013-04-23T13:14:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
#!/usr/bin/env python
# coding: utf8
import random, operator
from loader import *
def sentence_gen(corpora, n, priority, length=15):
"""Generates a sentence of given length, based on popular n-grams."""
counted_ngrams = {}
sorted_ngrams = {}
counted_ngrams = corpora.ngrams(n)
sorted_ngrams = sorted(counted_ngrams.iteritems(), key=operator.itemgetter(1))[::-1]
words = [None]
while len(filter(lambda w: w is not None, words)) < length:
index = random.randint(priority, priority+2)
filtered_ngrams = filter(lambda k: k[0][0] == words[-1], sorted_ngrams[n])
if len(words) + n >= length:
filtered_ngrams = filter(lambda item: item[0][-1] is None, filtered_ngrams)
if filtered_ngrams:
ngram = filtered_ngrams[min(index, len(filtered_ngrams)-1)]
ngram_words = ngram[0][1:]
words += ngram_words
else:
ngram = sorted_ngrams[n][index]
words += ["."] + list(ngram[0])
words = filter(lambda w: w is not None, words)
return " ".join(words)
Corpora.sentence_gen = sentence_gen
if __name__ == '__main__':
corpora = Corpora()
import argparse
parser = argparse.ArgumentParser(description="Makes silly sentences.")
parser.add_argument('-n', type=int, nargs='?', default=5, help="How many sentences?")
parser.add_argument('-l', '--length', type=int, nargs='?', default=15, help="How long sentences?")
args = parser.parse_args()
for i in range(args.n):
print corpora.sentence_gen(i, length=args.length)
|
[
"myrlund@gmail.com"
] |
myrlund@gmail.com
|
d3f832d3e767c0ff4bca2f5cccc70c027d529027
|
850d778687e3692ab2a38d4d2227391d92c21e6b
|
/atcoder.jp/abc083/abc083_a/Main.py
|
b29da90d1695a2084ca06588e76990130541eb02
|
[] |
no_license
|
Valkyrja3607/AtCoder
|
77e2e5e66c0e8e12bb902c35f679119c6576fad7
|
9218a50b1eb83e4498845d15d9dda41fab90ed73
|
refs/heads/master
| 2023-07-15T20:38:52.911301
| 2018-05-30T17:56:22
| 2018-05-30T17:56:22
| 294,980,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
a,b,c,d=map(int,input().split())
if a+b>c+d:
print("Left")
elif a+b<c+d:
print("Right")
else:
print("Balanced")
|
[
"purinjolly@gmail.com"
] |
purinjolly@gmail.com
|
85f6c7b51b3c817448e8059efe2d0bab5d80b27b
|
1b79374fe169ffa2d8dd381d8a21f314ef0e68df
|
/apps/mock_dojosecret/models.py
|
fef95460c238754cd1149f5492b69a74159239f6
|
[] |
no_license
|
HacTso/mock_dojosecret_project
|
c85900630e4da785b4dd0a6ae6856d54843100f3
|
fa1ff570d998e3d0dc028c99e5546e40d0ec03bc
|
refs/heads/master
| 2021-01-24T07:55:52.980273
| 2017-06-05T04:27:49
| 2017-06-05T04:27:49
| 93,364,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,822
|
py
|
from __future__ import unicode_literals
from django.db import models
import re
NAME_REGEX =re.compile('^[A-z]+$')
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
import bcrypt
# Create your models here.
class UserManager(models.Manager):
def register(self,postData):
# return (True)
if len(postData['username']) < 2 or not NAME_REGEX.match(postData['username']):
return {'error' : 'No fewer than 2 characters in username and letters only'}
elif len(postData['email']) < 1:
return {'error' : 'Email cannot be blank'}
elif not EMAIL_REGEX.match(postData['email']):
return {'error' : 'Invalid email format'}
elif User.objects.filter(email=postData['email']):
return {'error' : 'Email is already registered'}
elif len(postData['password']) < 8:
return {'error' : 'Please enter at least 8 characters of password'}
# elif User.objects.filter(password=postData['password']):
# return {'error' : 'The password was used'}
elif postData['confirm_password'] != postData['password']:
return {'error': 'The password NOT match!'}
else:
reg_pw = bcrypt.hashpw(postData['password'].encode('utf-8'), bcrypt.gensalt())
return {'loginUser' : User.objects.create(username = postData['username'], email = postData['email'], password = reg_pw) }
# print reg_pw
# return {'loginUser' : User.objects.create(username = postData['username'])}
# return {'loginUser' : User.objects.create(username = postData['username'], email = postData['email'])}
def login(self, postData):
if len(postData['email']) < 1:
return {'error' : 'Email cannot be blank'}
elif not EMAIL_REGEX.match(postData['email']):
return {'error' : 'Invalid Format'}
elif not User.objects.filter(email = postData['email']):
return {'error' : 'user does not exist.'}
elif len(postData['password']) < 8:
return {'error' : 'Please enter at least 8 characters of password'}
else:
if User.objects.filter(email = postData['email']):
db_pw = User.objects.get(email = postData['email']).password
login_pw = bcrypt.hashpw(postData['password'].encode(), db_pw.encode())
if login_pw != db_pw:
return {'error' : 'Wrong password'}
else:
print "Success login"
return { 'loginUser' : User.objects.get(email=postData['email']) }
class SecretManager(models.Manager):
def secret_validation(self, posted_secret_text, user_id):
# print "step 2"
if len(posted_secret_text) <1:
# print "stpe 3"
return {'error' : "You must post a secret, otherwise........."}
else:
# print "validation true"
loginUser = User.objects.get(id = user_id)
self.create(secret_text = posted_secret_text, author = loginUser)
return {'error' : "Your secret is safe with us"}
# 'loginSecret' : Secret.objects.create(secret_text = posted_secret_text, user = User.objects.get(id = user_id))
class User(models.Model):
username = models.CharField(max_length = 255)
email = models.CharField(max_length = 255)
password = models.CharField(max_length = 255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = UserManager()
def __str__(self):
return str(self.id) + self.username + self.email + self.password
class Secret(models.Model):
secret_text = models.TextField(max_length = 1000)
author = models.ForeignKey(User, related_name="secrets_author")
likers = models.ManyToManyField(User, related_name = "likedsecrets")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = SecretManager()
def __str__(self):
return str(self.id) + self.secret_text +str(self.author.id) + self.author.username
|
[
"hackknh@gmail.com"
] |
hackknh@gmail.com
|
6c927fd560a5286ca585f48c71da1ae010982efc
|
6bbe5d00cb2084ddd8e2fa6cc6559abe94ef4f12
|
/client/tts.py
|
d999b5d0ff92520dca66c3722cffb2a2d481262a
|
[] |
no_license
|
stigaro/ttm4115-project-team14
|
8b6cf37d60461a18ef3872c505fbf4182b4cc7d9
|
742bf63e6a2ee4d916cddda05f193247875750cd
|
refs/heads/master
| 2023-04-17T01:36:37.797459
| 2021-05-01T12:57:36
| 2021-05-01T12:57:36
| 357,459,929
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,286
|
py
|
from stmpy import Machine, Driver
from os import system
from gtts import gTTS
from pydub import AudioSegment
import pyaudio
import wave
class Speaker:
def __init__(self):
self.audio_file_name = "string"
def speak(self, string):
# TTS
tts = gTTS(string, lang="en")
tts.save(self.audio_file_name+'.mp3')
# Convert .mp3 to .wav using ffmpeg
sound = AudioSegment.from_mp3(self.audio_file_name+'.mp3')
sound.export(self.audio_file_name+'.wav', format="wav")
# Play the .wav file using PyAudio
filename = self.audio_file_name+'.wav'
# Set chunk size of 1024 samples per data frame
chunk = 1024
# Open the sound file
wf = wave.open(filename, 'rb')
# Create an interface to PortAudio
p = pyaudio.PyAudio()
# Open a .Stream object to write the WAV file to
# 'output = True' indicates that the sound will be played rather than recorded
stream = p.open(format = p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = wf.getframerate(),
output = True)
# Read data in chunks
data = wf.readframes(chunk)
# Play the sound by writing the audio data to the stream
while data != b'':
stream.write(data)
data = wf.readframes(chunk)
# Close and terminate the stream
stream.close()
p.terminate()
if __name__ == "__main__":
speaker = Speaker()
t0 = {'source': 'initial', 'target': 'ready'}
t1 = {'trigger': 'speak', 'source': 'ready', 'target': 'speaking'}
t2 = {'trigger': 'done', 'source': 'speaking', 'target': 'ready'}
s1 = {'name': 'speaking', 'do': 'speak(*)', 'speak': 'defer'}
stm = Machine(name='stm', transitions=[t0, t1, t2], states=[s1], obj=speaker)
speaker.stm = stm
driver = Driver()
driver.add_machine(stm)
driver.start()
driver.send('speak', 'stm', args=['My first sentence.'])
driver.send('speak', 'stm', args=['My second sentence.'])
driver.send('speak', 'stm', args=['My third sentence.'])
driver.send('speak', 'stm', args=['My fourth sentence.'])
driver.wait_until_finished()
|
[
"shirajuki00@gmail.com"
] |
shirajuki00@gmail.com
|
7e22d1771263f8683682167b4be8379f2aa928d4
|
e17440e48ad6cc7d69bb19395f815292a08a7388
|
/common/apps/collectors/add_timestamp_and_id.py
|
17b8fd6b6c504deaf5c993e0f17bae71b892e459
|
[] |
no_license
|
Tecquilka/uwobs
|
70b127c14bd16339214500c1386da2b4d858ceb9
|
bf1350e1158efe571db251e39456ae4d7d95ff01
|
refs/heads/master
| 2022-12-28T20:13:04.540963
| 2020-10-13T09:30:09
| 2020-10-13T09:30:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
#!/usr/bin/env python
import datetime
import sys
if len(sys.argv) != 2:
sys.stderr.write("Usage: {0} IDENTIFIER\n".format(sys.argv[0]))
sys.exit(2)
id=sys.argv[1]
try:
while True:
line = sys.stdin.readline()
if not line:
break
now = datetime.datetime.utcnow().isoformat()[:-3]+"Z"
print("{0}|{1}|{2}".format(now,id,line.rstrip("\n")))
except (KeyboardInterrupt, SystemExit):
pass
|
[
"fullergalway@gmail.com"
] |
fullergalway@gmail.com
|
56c2d58dcd0b848ea888575681f0c0591b9d5bbc
|
30b8c9fcd359a7f15ad42dd7733a17e3020ff27c
|
/hipster/urls.py
|
ca3c62e6b4863498a6f75128e1ec1b3854a566e7
|
[] |
no_license
|
MySecondLanguage/django-rest-auth-email-verify-demo
|
cdfec9efeee7174f0c91b59ea6939813c7fd5a61
|
4d243a471d5dfe338369eb2614215a6303022331
|
refs/heads/main
| 2023-03-20T20:00:07.199132
| 2021-03-08T11:08:38
| 2021-03-08T11:08:46
| 345,623,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,028
|
py
|
"""hipster URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from rest_framework.documentation import include_docs_urls
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
from rest_framework.authtoken import views
from rest_auth.views import PasswordResetConfirmView
from allauth.account.views import AccountInactiveView
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='Wejhaat API')
urlpatterns = [
path('admin/', admin.site.urls),
path("api/token/", TokenObtainPairView.as_view(), name="token_obtain_pair"),
path("api/token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
path('rest-auth/', include('rest_auth.urls')), #for forget password api endpoint
path('rest-auth/registration/', include('rest_auth.registration.urls')), # handle the error: https://github.com/iMerica/dj-rest-auth/issues/9 # https://django-allauth.readthedocs.io/en/latest/configuration.html
path("account-inactive/", AccountInactiveView.as_view(), name="account_inactive"),
re_path(r'^rest-auth/password/reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', PasswordResetConfirmView.as_view(),
name='password_reset_confirm'),
path("api/docs/", include_docs_urls(title="Tourism API")),
path('doc/', schema_view),
]
|
[
"mmamunism@gmail.com"
] |
mmamunism@gmail.com
|
1dbfcc3d47f3a48af022c5b19fdcc27352f4d401
|
d2b54d3df1dc8f7e88c0d209b35949089facc73f
|
/treenode/memory.py
|
b5c7ddd2c1dd51260daf32b36666209d52ca2176
|
[
"MIT"
] |
permissive
|
domlysi/django-treenode
|
df8b08e756884bc8daffdfad7b5b3b102e92e309
|
86e7c76e2b2d60c071cfce6ad1493b2b51f2d304
|
refs/heads/master
| 2022-12-12T18:10:44.668904
| 2020-08-17T11:01:09
| 2020-08-17T11:01:09
| 287,275,877
| 0
| 0
|
MIT
| 2020-08-13T12:37:54
| 2020-08-13T12:37:54
| null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import weakref
__refs__ = defaultdict(weakref.WeakSet)
def clear_refs(cls):
__refs__[cls].clear()
def get_refs(cls):
return __refs__[cls]
def set_ref(cls, obj):
if obj.pk:
__refs__[cls].add(obj)
def update_refs(cls, data):
for obj in get_refs(cls):
obj_key = str(obj.pk)
obj_data = data.get(obj_key)
if obj_data:
for key, value in obj_data.items():
setattr(obj, key, value)
|
[
"fabio.caccamo@gmail.com"
] |
fabio.caccamo@gmail.com
|
da71551170a8138c807fd10d025d929b6f0e1ef2
|
df3ac7e78ea022ecf156e3a94ae009355fdeb484
|
/jobs/views.py
|
a391b16f522cae398d76a6c782c867e5ef7f4438
|
[] |
no_license
|
Dheerajkarmankar/portfolio-project
|
6695d9db66e338d5cdaac7b213ada3c205e06ded
|
b18f411f3de877d69e861e19df26e77681f70b98
|
refs/heads/master
| 2020-11-27T03:17:13.020812
| 2019-12-20T14:50:45
| 2019-12-20T14:50:45
| 229,284,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
from django.shortcuts import render
from .models import Job
def home(request):
jobs = Job.objects
return render(request, 'jobs/home.html', {'jobs': jobs})
|
[
"dkarmankar7@gmail.com"
] |
dkarmankar7@gmail.com
|
5b17489d18983256040a969c95cfb04fe56a0ad6
|
2fb266cb1860008187cdec533d0970183d107e58
|
/server/status.py
|
e35cd60a784ac5ad64e9bc96bfbc7edff3f163b8
|
[
"MIT"
] |
permissive
|
bizet/xpmanager
|
d12fe1e16bee227da825297ec4f5f7290182fa2a
|
3f5e62e894c5a4df42d610016a22179726b9d621
|
refs/heads/master
| 2021-01-22T21:32:16.865883
| 2013-11-04T08:18:35
| 2013-11-04T08:18:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
from db_info import db_xpmanager
class Status:
def __init__(self):
pass
def get(self, i):
status_options = list(db_xpmanager.select('status'))
return status_options
|
[
"bizet.cn@hotmail.com"
] |
bizet.cn@hotmail.com
|
0a7df3c31e01f5af78445a98036348be5e9ad1d7
|
2eadc45342aa161b7367cd09a2465c98d525882a
|
/secretsanta/__main__.py
|
c16719c29f42d454b61cea94358ea9ef7f8717d2
|
[
"MIT"
] |
permissive
|
jminuscula/secretsanta
|
a99313b7ed6c5cabe161a52e89d725712e178c12
|
0a305001a9a0ea26cf782f50ebba77256a28117a
|
refs/heads/master
| 2021-12-07T20:08:12.560279
| 2021-11-21T15:09:29
| 2021-11-21T15:09:29
| 111,299,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
#!/usr/bin/env python3.4
import sys
import argparse
import json
from .participant import read_participants_csv
from .manager import SecretSantaDebugManager, SecretSantaDefaultManager
def get_arguments(args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--email', '-e', metavar='PATH',
dest='email_config', help='Email json config',
)
parser.add_argument(
'--template', '-t',
dest='template', help='Message template'
)
parser.add_argument(
'--seed', '-s', type=int, metavar='N',
dest='seed',
required=True, help='Seed value',
)
parser.add_argument(
'--debug', '-d', action='store_true',
dest='debug',
default=False, help='Debug mode',
)
parser.add_argument(
'--participants', '-p', metavar='PATH',
dest='participants_csv_path',
required=True, help='Participants CSV (name,email) path'
)
return parser.parse_args(args)
manager = None
args = get_arguments(sys.argv[1:])
participants = read_participants_csv(args.participants_csv_path)
if args.debug:
manager = SecretSantaDebugManager(
participants,
seed=args.seed,
template="{sfrom.name} ({sfrom.email}) -> {sto.name} ({sto.email})",
)
elif not args.debug and args.email_config:
config = {}
with open(args.email_config) as ec:
config = json.load(ec)
manager = SecretSantaDefaultManager(
participants,
seed=args.seed,
template_file=args.template,
email_config=config
)
if manager:
manager.run()
|
[
"jacobo.tarragon@gmail.com"
] |
jacobo.tarragon@gmail.com
|
74cff82d3fb4d7b9313cdc8f801d09727367361a
|
abbb1e132b3d339ba2173129085f252e2f3311dc
|
/model-optimizer/extensions/middle/RemoveUselessConcatSplit_test.py
|
7f91a7e4cbc1b0606f910b5a814c723cf707ab7f
|
[
"Apache-2.0"
] |
permissive
|
0xF6/openvino
|
56cce18f1eb448e25053fd364bcbc1da9f34debc
|
2e6c95f389b195f6d3ff8597147d1f817433cfb3
|
refs/heads/master
| 2022-12-24T02:49:56.686062
| 2020-09-22T16:05:34
| 2020-09-22T16:05:34
| 297,745,570
| 2
| 0
|
Apache-2.0
| 2020-09-22T19:03:06
| 2020-09-22T19:03:04
| null |
UTF-8
|
Python
| false
| false
| 16,709
|
py
|
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from extensions.middle.RemoveUselessConcatSplit import RemoveUselessConcatSplitPattern
from mo.front.common.partial_infer.utils import int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph
class RemoveUselessConcatSplitTests(unittest.TestCase):
def test_useless_concat_split(self):
graph = build_graph({'br1': {'kind': 'op', 'op': None},
'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'br2': {'kind': 'op', 'op': None},
'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'br3': {'kind': 'op', 'op': None},
'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'concat': {'kind': 'op', 'op': 'Concat'},
'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])},
'split': {'kind': 'op', 'op': 'Split'},
'split_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'split_br1': {'kind': 'op', 'op': None},
'split_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'split_br2': {'kind': 'op', 'op': None},
'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'split_br3': {'kind': 'op', 'op': None},
},
[('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'),
('br_data_1', 'concat', {'in': 0}),
('br_data_2', 'concat', {'in': 1}),
('br_data_3', 'concat', {'in': 2}),
('concat', 'concat_data'),
('concat_data', 'split'),
('split', 'split_data_1', {'out': 0}),
('split', 'split_data_2', {'out': 1}),
('split', 'split_data_3', {'out': 2}),
('split_data_1', 'split_br1'),
('split_data_2', 'split_br2'),
('split_data_3', 'split_br3')])
RemoveUselessConcatSplitPattern().find_and_replace_pattern(graph)
ref_graph = build_graph({'br1': {'kind': 'op', 'op': None},
'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'br2': {'kind': 'op', 'op': None},
'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'br3': {'kind': 'op', 'op': None},
'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'split_br1': {'kind': 'op', 'op': None},
'split_br2': {'kind': 'op', 'op': None},
'split_br3': {'kind': 'op', 'op': None}},
[('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'),
('br_data_1', 'split_br1'),
('br_data_2', 'split_br2'),
('br_data_3', 'split_br3'),
])
(flag, resp) = compare_graphs(graph, ref_graph, 'split_br3')
self.assertTrue(flag, resp)
def test_usefull_concat_split(self):
graph = build_graph({'br1': {'kind': 'op', 'op': None},
'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'br2': {'kind': 'op', 'op': None},
'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'br3': {'kind': 'op', 'op': None},
'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'concat': {'kind': 'op', 'op': 'Concat'},
'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])},
'split': {'kind': 'op', 'op': 'Split'},
'split_data_1': {'kind': 'data', 'shape': int64_array([1, 36])},
'split_br1': {'kind': 'op', 'op': None},
'split_data_2': {'kind': 'data', 'shape': int64_array([1, 26])},
'split_br2': {'kind': 'op', 'op': None},
'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'split_br3': {'kind': 'op', 'op': None},
},
[('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'),
('br_data_1', 'concat', {'in': 0}),
('br_data_2', 'concat', {'in': 1}),
('br_data_3', 'concat', {'in': 2}),
('concat', 'concat_data'),
('concat_data', 'split'),
('split', 'split_data_1', {'out': 0}),
('split', 'split_data_2', {'out': 1}),
('split', 'split_data_3', {'out': 2}),
('split_data_1', 'split_br1'),
('split_data_2', 'split_br2'),
('split_data_3', 'split_br3')])
RemoveUselessConcatSplitPattern().find_and_replace_pattern(graph)
ref_graph = build_graph({'br1': {'kind': 'op', 'op': None},
'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'br2': {'kind': 'op', 'op': None},
'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'br3': {'kind': 'op', 'op': None},
'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'concat': {'kind': 'op', 'op': 'Concat'},
'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])},
'split': {'kind': 'op', 'op': 'Split'},
'split_data_1': {'kind': 'data', 'shape': int64_array([1, 36])},
'split_br1': {'kind': 'op', 'op': None},
'split_data_2': {'kind': 'data', 'shape': int64_array([1, 26])},
'split_br2': {'kind': 'op', 'op': None},
'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'split_br3': {'kind': 'op', 'op': None},
},
[('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'),
('br_data_1', 'concat', {'in': 0}),
('br_data_2', 'concat', {'in': 1}),
('br_data_3', 'concat', {'in': 2}),
('concat', 'concat_data'),
('concat_data', 'split'),
('split', 'split_data_1', {'out': 0}),
('split', 'split_data_2', {'out': 1}),
('split', 'split_data_3', {'out': 2}),
('split_data_1', 'split_br1'),
('split_data_2', 'split_br2'),
('split_data_3', 'split_br3')])
(flag, resp) = compare_graphs(graph, ref_graph, 'split_br3')
self.assertTrue(flag, resp)
def test_useful_concat_2_outputs_split(self):
graph = build_graph({'br1': {'kind': 'op', 'op': None},
'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'br2': {'kind': 'op', 'op': None},
'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'br3': {'kind': 'op', 'op': None},
'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'concat': {'kind': 'op', 'op': 'Concat'},
'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])},
'placeholder': {'kind': 'op', 'op': None},
'split': {'kind': 'op', 'op': 'Split'},
'split_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'split_br1': {'kind': 'op', 'op': None},
'split_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'split_br2': {'kind': 'op', 'op': None},
'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'split_br3': {'kind': 'op', 'op': None},
},
[('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'),
('br_data_1', 'concat', {'in': 0}),
('br_data_2', 'concat', {'in': 1}),
('br_data_3', 'concat', {'in': 2}),
('concat', 'concat_data'),
('concat_data', 'split'),
('concat_data', 'placeholder'),
('split', 'split_data_1', {'out': 0}),
('split', 'split_data_2', {'out': 1}),
('split', 'split_data_3', {'out': 2}),
('split_data_1', 'split_br1'),
('split_data_2', 'split_br2'),
('split_data_3', 'split_br3')])
RemoveUselessConcatSplitPattern().find_and_replace_pattern(graph)
ref_graph = build_graph({'br1': {'kind': 'op', 'op': None},
'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'br2': {'kind': 'op', 'op': None},
'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'br3': {'kind': 'op', 'op': None},
'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'concat': {'kind': 'op', 'op': 'Concat'},
'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])},
'placeholder': {'kind': 'op', 'op': None},
'split': {'kind': 'op', 'op': 'Split'},
'split_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'split_br1': {'kind': 'op', 'op': None},
'split_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'split_br2': {'kind': 'op', 'op': None},
'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'split_br3': {'kind': 'op', 'op': None},
},
[('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'),
('br_data_1', 'concat', {'in': 0}),
('br_data_2', 'concat', {'in': 1}),
('br_data_3', 'concat', {'in': 2}),
('concat', 'concat_data'),
('concat_data', 'split'),
('concat_data', 'placeholder'),
('split', 'split_data_1', {'out': 0}),
('split', 'split_data_2', {'out': 1}),
('split', 'split_data_3', {'out': 2}),
('split_data_1', 'split_br1'),
('split_data_2', 'split_br2'),
('split_data_3', 'split_br3')])
(flag, resp) = compare_graphs(graph, ref_graph, 'split_br3')
self.assertTrue(flag, resp)
def test_useless_concat_split_2_outputs(self):
graph = build_graph({'br1': {'kind': 'op', 'op': None},
'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'br2': {'kind': 'op', 'op': None},
'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'br3': {'kind': 'op', 'op': None},
'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'concat': {'kind': 'op', 'op': 'Concat'},
'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])},
'split': {'kind': 'op', 'op': 'Split'},
'split_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'split_br1': {'kind': 'op', 'op': None},
'split_br1_1': {'kind': 'op', 'op': None},
'split_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'split_br2': {'kind': 'op', 'op': None},
'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'split_br3': {'kind': 'op', 'op': None},
},
[('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'),
('br_data_1', 'concat', {'in': 0}),
('br_data_2', 'concat', {'in': 1}),
('br_data_3', 'concat', {'in': 2}),
('concat', 'concat_data'),
('concat_data', 'split'),
('split', 'split_data_1', {'out': 0}),
('split', 'split_data_2', {'out': 1}),
('split', 'split_data_3', {'out': 2}),
('split_data_1', 'split_br1'),
('split_data_1', 'split_br1_1'),
('split_data_2', 'split_br2'),
('split_data_3', 'split_br3')])
RemoveUselessConcatSplitPattern().find_and_replace_pattern(graph)
ref_graph = build_graph({'br1': {'kind': 'op', 'op': None},
'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])},
'br2': {'kind': 'op', 'op': None},
'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])},
'br3': {'kind': 'op', 'op': None},
'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])},
'split_br1': {'kind': 'op', 'op': None},
'split_br1_1': {'kind': 'op', 'op': None},
'split_br2': {'kind': 'op', 'op': None},
'split_br3': {'kind': 'op', 'op': None}},
[('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'),
('br_data_1', 'split_br1'),
('br_data_1', 'split_br1_1'),
('br_data_2', 'split_br2'),
('br_data_3', 'split_br3'),
])
(flag, resp) = compare_graphs(graph, ref_graph, 'split_br3')
self.assertTrue(flag, resp)
|
[
"alexey.suhov@intel.com"
] |
alexey.suhov@intel.com
|
30c2edac25e1e11650ef113d805ea63ffa3f6a75
|
d2ec03d034bddc968ab850bf5a4593087f9eafe7
|
/train_se3posenets.py
|
b2acac894a4ed0efd11d1ad2f16d4b81063f4585
|
[] |
no_license
|
abyravan/se3posenets-pytorch
|
b5c61550939a84e6df3f73c3372062a266f4f711
|
d15a2db3f2e708fafd6912f877e53aec6eed5ee1
|
refs/heads/master
| 2020-05-15T13:44:13.853586
| 2019-09-06T02:49:04
| 2019-09-06T02:49:04
| 182,308,095
| 16
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48,573
|
py
|
# Global imports
import os
import sys
import shutil
import time
import numpy as np
import matplotlib.pyplot as plt
import random
# Torch imports
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
from torch.autograd import Variable
import torchvision
torch.multiprocessing.set_sharing_strategy('file_system')
# Local imports
import se3layers as se3nn
import data
import ctrlnets
import util
from util import AverageMeter, Tee, DataEnumerator
import helperfuncs as helpers
#### Setup options
# Common
import argparse
import options
parser = options.setup_comon_options()
# Loss options
parser.add_argument('--pt-wt', default=1, type=float,
metavar='WT', help='Weight for the 3D point loss - only FWD direction (default: 1)')
parser.add_argument('--use-full-jt-angles', action='store_true', default=False,
help='Use angles of all joints as inputs to the networks (default: False)')
# Define xrange
try:
a = xrange(1)
except NameError: # Not defined in Python 3.x
def xrange(*args):
return iter(range(*args))
################ MAIN
#@profile
def main():
# Parse args
global args, num_train_iter
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.batch_norm = not args.no_batch_norm
### Create save directory and start tensorboard logger
util.create_dir(args.save_dir) # Create directory
now = time.strftime("%c")
tblogger = util.TBLogger(args.save_dir + '/logs/' + now) # Start tensorboard logger
# Create logfile to save prints
logfile = open(args.save_dir + '/logs/' + now + '/logfile.txt', 'w')
backup = sys.stdout
sys.stdout = Tee(sys.stdout, logfile)
########################
############ Parse options
# Set seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# 480 x 640 or 240 x 320
if args.full_res:
print("Using full-resolution images (480x640)")
# Get default options & camera intrinsics
args.cam_intrinsics, args.cam_extrinsics, args.ctrl_ids = [], [], []
args.state_labels = []
for k in xrange(len(args.data)):
load_dir = args.data[k] #args.data.split(',,')[0]
try:
# Read from file
intrinsics = data.read_intrinsics_file(load_dir + "/intrinsics.txt")
print("Reading camera intrinsics from: " + load_dir + "/intrinsics.txt")
if args.full_res:
args.img_ht, args.img_wd = int(intrinsics['ht']), int(intrinsics['wd'])
else:
args.img_ht, args.img_wd = 240, 320 # All data except SE(2) data is at 240x320 resolution
args.img_scale = 1.0 / intrinsics['s'] # Scale of the image (use directly from the data)
# Setup camera intrinsics
sc = float(args.img_ht) / intrinsics['ht'] # Scale factor for the intrinsics
cam_intrinsics = {'fx': intrinsics['fx'] * sc,
'fy': intrinsics['fy'] * sc,
'cx': intrinsics['cx'] * sc,
'cy': intrinsics['cy'] * sc}
print("Scale factor for the intrinsics: {}".format(sc))
except:
print("Could not read intrinsics file, reverting to default settings")
args.img_ht, args.img_wd, args.img_scale = 240, 320, 1e-4
cam_intrinsics = {'fx': 589.3664541825391 / 2,
'fy': 589.3664541825391 / 2,
'cx': 320.5 / 2,
'cy': 240.5 / 2}
print("Intrinsics => ht: {}, wd: {}, fx: {}, fy: {}, cx: {}, cy: {}".format(args.img_ht, args.img_wd,
cam_intrinsics['fx'],
cam_intrinsics['fy'],
cam_intrinsics['cx'],
cam_intrinsics['cy']))
# Compute intrinsic grid & add to list
cam_intrinsics['xygrid'] = data.compute_camera_xygrid_from_intrinsics(args.img_ht, args.img_wd,
cam_intrinsics)
args.cam_intrinsics.append(cam_intrinsics) # Add to list of intrinsics
### BAXTER DATA
# Compute extrinsics
cam_extrinsics = data.read_cameradata_file(load_dir + '/cameradata.txt')
# Get dimensions of ctrl & state
try:
statelabels, ctrllabels, trackerlabels = data.read_statectrllabels_file(load_dir + "/statectrllabels.txt")
print("Reading state/ctrl joint labels from: " + load_dir + "/statectrllabels.txt")
except:
statelabels = data.read_statelabels_file(load_dir + '/statelabels.txt')['frames']
ctrllabels = statelabels # Just use the labels
trackerlabels = []
print("Could not read statectrllabels file. Reverting to labels in statelabels file")
#args.num_state, args.num_ctrl, args.num_tracker = len(statelabels), len(ctrllabels), len(trackerlabels)
#print('Num state: {}, Num ctrl: {}'.format(args.num_state, args.num_ctrl))
args.num_ctrl = len(ctrllabels)
print('Num ctrl: {}'.format(args.num_ctrl))
# Find the IDs of the controlled joints in the state vector
# We need this if we have state dimension > ctrl dimension and
# if we need to choose the vals in the state vector for the control
ctrlids_in_state = torch.LongTensor([statelabels.index(x) for x in ctrllabels])
print("ID of controlled joints in the state vector: ", ctrlids_in_state.view(1, -1))
# Add to list of intrinsics
args.cam_extrinsics.append(cam_extrinsics)
args.ctrl_ids.append(ctrlids_in_state)
args.state_labels.append(statelabels)
# Data noise
if not hasattr(args, "add_noise_data") or (len(args.add_noise_data) == 0):
args.add_noise_data = [False for k in xrange(len(args.data))] # By default, no noise
else:
assert(len(args.data) == len(args.add_noise_data))
if hasattr(args, "add_noise") and args.add_noise: # BWDs compatibility
args.add_noise_data = [True for k in xrange(len(args.data))]
# Get mean/std deviations of dt for the data
if args.mean_dt == 0:
args.mean_dt = args.step_len * (1.0 / 30.0)
args.std_dt = 0.005 # +- 10 ms
print("Using default mean & std.deviation based on the step length. Mean DT: {}, Std DT: {}".format(
args.mean_dt, args.std_dt))
else:
exp_mean_dt = (args.step_len * (1.0 / 30.0))
assert ((args.mean_dt - exp_mean_dt) < 1.0 / 30.0), \
"Passed in mean dt ({}) is very different from the expected value ({})".format(
args.mean_dt, exp_mean_dt) # Make sure that the numbers are reasonable
print("Using passed in mean & std.deviation values. Mean DT: {}, Std DT: {}".format(
args.mean_dt, args.std_dt))
# Image suffix
args.img_suffix = '' if (args.img_suffix == 'None') else args.img_suffix # Workaround since we can't specify empty string in the yaml
print('Ht: {}, Wd: {}, Suffix: {}, Num ctrl: {}'.format(args.img_ht, args.img_wd, args.img_suffix, args.num_ctrl))
# Read mesh ids and camera data (for baxter)
args.baxter_labels = data.read_statelabels_file(args.data[0] + '/statelabels.txt')
args.mesh_ids = args.baxter_labels['meshIds']
# SE3 stuff
assert (args.se3_type in ['se3euler', 'se3aa', 'se3quat', 'affine', 'se3spquat', 'se3aar']), 'Unknown SE3 type: ' + args.se3_type
args.delta_pivot = ''
print('Predicting {} SE3s of type: {}'.format(args.num_se3, args.se3_type))
# Sequence stuff
print('Step length: {}, Seq length: {}'.format(args.step_len, args.seq_len))
# Loss parameters
print('Loss scale: {}, Loss weights => PT: {}, CONSIS: {}'.format(
args.loss_scale, args.pt_wt, args.consis_wt))
# Weight sharpening stuff
if args.use_wt_sharpening:
print('Using weight sharpening to encourage binary mask prediction. Start iter: {}, Rate: {}, Noise stop iter: {}'.format(
args.sharpen_start_iter, args.sharpen_rate, args.noise_stop_iter))
# Loss type
norm_motion = ', Normalizing loss based on GT motion' if args.motion_norm_loss else ''
print('3D loss type: ' + args.loss_type + norm_motion)
# Wide model
if args.wide_model:
print('Using a wider network!')
if args.use_jt_angles:
print("Using Jt angles as input to the pose encoder")
if args.use_jt_angles_trans:
print("Using Jt angles as input to the transition model")
# DA threshold / winsize
print("Flow/visibility computation. DA threshold: {}, DA winsize: {}".format(args.da_threshold,
args.da_winsize))
if args.use_only_da_for_flows:
print("Computing flows using only data-associations. Flows can only be computed for visible points")
else:
print("Computing flows using tracker poses. Can get flows for all input points")
########################
############ Load datasets
# Get datasets
load_color = None
if args.reject_left_motion:
print("Examples where any joint of the left arm moves by > 0.005 radians inter-frame will be discarded. \n"
"NOTE: This test will be slow on any machine where the data needs to be fetched remotely")
if args.reject_right_still:
print("Examples where no joint of the right arm move by > 0.015 radians inter-frame will be discarded. \n"
"NOTE: This test will be slow on any machine where the data needs to be fetched remotely")
if args.add_noise:
print("Adding noise to the depths, actual configs & ctrls")
print("Baxter dataset")
valid_filter = lambda p, n, st, se, slab: data.valid_data_filter(p, n, st, se, slab,
mean_dt=args.mean_dt, std_dt=args.std_dt,
reject_left_motion=args.reject_left_motion,
reject_right_still=args.reject_right_still)
read_seq_func = data.read_baxter_sequence_from_disk
### Noise function
#noise_func = lambda d, c: data.add_gaussian_noise(d, c, std_d=0.02,
# scale_d=True, std_j=0.02) if args.add_noise else None
noise_func = lambda d: data.add_edge_based_noise(d, zthresh=0.04, edgeprob=0.35,
defprob=0.005, noisestd=0.005)
### Load functions
baxter_data = data.read_recurrent_baxter_dataset(args.data, args.img_suffix,
step_len = args.step_len, seq_len = args.seq_len,
train_per = args.train_per, val_per = args.val_per,
valid_filter = valid_filter,
cam_extrinsics=args.cam_extrinsics,
cam_intrinsics=args.cam_intrinsics,
ctrl_ids=args.ctrl_ids,
state_labels=args.state_labels,
add_noise=args.add_noise_data)
disk_read_func = lambda d, i: read_seq_func(d, i, img_ht = args.img_ht, img_wd = args.img_wd,
img_scale = args.img_scale, ctrl_type = args.ctrl_type,
num_ctrl=args.num_ctrl,
#num_state=args.num_state,
mesh_ids = args.mesh_ids,
#ctrl_ids=ctrlids_in_state,
#camera_extrinsics = args.cam_extrinsics,
#camera_intrinsics = args.cam_intrinsics,
compute_bwdflows=False,
#num_tracker=args.num_tracker,
dathreshold=args.da_threshold, dawinsize=args.da_winsize,
use_only_da=args.use_only_da_for_flows,
noise_func=noise_func,
load_color=load_color) # Need BWD flows / masks if using GT masks
train_dataset = data.BaxterSeqDataset(baxter_data, disk_read_func, 'train') # Train dataset
val_dataset = data.BaxterSeqDataset(baxter_data, disk_read_func, 'val') # Val dataset
test_dataset = data.BaxterSeqDataset(baxter_data, disk_read_func, 'test') # Test dataset
print('Dataset size => Train: {}, Validation: {}, Test: {}'.format(len(train_dataset), len(val_dataset), len(test_dataset)))
# Create a data-collater for combining the samples of the data into batches along with some post-processing
if args.evaluate:
# Load only test loader
args.imgdisp_freq = 10 * args.disp_freq # Tensorboard log frequency for the image data
sampler = torch.utils.data.dataloader.SequentialSampler(test_dataset) # Run sequentially along the test dataset
# torch.manual_seed(args.seed)
# if args.cuda:
# torch.cuda.manual_seed(args.seed)
# sampler = torch.utils.data.dataloader.RandomSampler(test_dataset) # Random sampler
test_loader = DataEnumerator(util.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, sampler=sampler,
pin_memory=args.use_pin_memory,
collate_fn=test_dataset.collate_batch))
else:
# Create dataloaders (automatically transfer data to CUDA if args.cuda is set to true)
train_loader = DataEnumerator(util.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=args.use_pin_memory,
collate_fn=train_dataset.collate_batch))
val_loader = DataEnumerator(util.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=args.use_pin_memory,
collate_fn=val_dataset.collate_batch))
########################
############ Load models & optimization stuff
assert not args.use_full_jt_angles, "Can only use as many jt angles as the control dimension"
print('Using state of controllable joints')
args.num_state_net = args.num_ctrl # Use only the jt angles of the controllable joints
### Load the model
num_train_iter = 0
num_input_channels = 3 # Num input channels
modelfn = ctrlnets.MultiStepSE3PoseModel
model = modelfn(num_ctrl=args.num_ctrl, num_se3=args.num_se3,
se3_type=args.se3_type, delta_pivot=args.delta_pivot,
input_channels=num_input_channels, use_bn=args.batch_norm, nonlinearity=args.nonlin,
init_posese3_iden=args.init_posese3_iden, init_transse3_iden=args.init_transse3_iden,
use_wt_sharpening=args.use_wt_sharpening, sharpen_start_iter=args.sharpen_start_iter,
sharpen_rate=args.sharpen_rate, pre_conv=args.pre_conv, decomp_model=args.decomp_model,
local_delta_se3=args.local_delta_se3,
wide=args.wide_model, use_jt_angles=args.use_jt_angles,
use_jt_angles_trans=args.use_jt_angles_trans, num_state=args.num_state_net,
full_res=args.full_res, noise_stop_iter=args.noise_stop_iter,
trans_type="default", posemask_type="default")
if args.cuda:
model.cuda() # Convert to CUDA if enabled
### Load optimizer
optimizer = helpers.load_optimizer(args.optimization, model.parameters(), lr=args.lr,
momentum=args.momentum, weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
# TODO: Save path to TB log dir, save new log there again
# TODO: Reuse options in args (see what all to use and what not)
# TODO: Use same num train iters as the saved checkpoint
# TODO: Print some stats on the training so far, reset best validation loss, best epoch etc
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
loadargs = checkpoint['args']
args.start_epoch = checkpoint['epoch']
if args.reset_train_iter:
num_train_iter = 0 # Reset to 0
else:
num_train_iter = checkpoint['train_iter']
try:
model.load_state_dict(checkpoint['state_dict']) # BWDs compatibility (TODO: remove)
except:
model.load_state_dict(checkpoint['model_state_dict'])
assert (loadargs.optimization == args.optimization), "Optimizer in saved checkpoint ({}) does not match current argument ({})".format(
loadargs.optimization, args.optimization)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
print("=> loaded checkpoint '{}' (epoch {}, train iter {})"
.format(args.resume, checkpoint['epoch'], num_train_iter))
best_loss = checkpoint['best_loss'] if 'best_loss' in checkpoint else float("inf")
best_floss = checkpoint['best_flow_loss'] if 'best_flow_loss' in checkpoint else float("inf")
best_fcloss = checkpoint['best_flowconsis_loss'] if 'best_flowconsis_loss' in checkpoint else float("inf")
best_epoch = checkpoint['best_epoch'] if 'best_epoch' in checkpoint else 0
best_fepoch = checkpoint['best_flow_epoch'] if 'best_flow_epoch' in checkpoint else 0
best_fcepoch = checkpoint['best_flowconsis_epoch'] if 'best_flowconsis_epoch' in checkpoint else 0
print('==== Best validation loss: {} was from epoch: {} ===='.format(best_loss, best_epoch))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
best_loss, best_floss, best_fcloss = float("inf"), float("inf"), float("inf")
best_epoch, best_fepoch, best_fcepoch = 0, 0, 0
########################
############ Test (don't create the data loader unless needed, creates 4 extra threads)
if args.evaluate:
# Delete train and val loaders
#del train_loader, val_loader
# TODO: Move this to before the train/val loader creation??
print('==== Evaluating pre-trained network on test data ===')
test_stats = iterate(test_loader, model, tblogger, len(test_loader), mode='test')
# Save final test error
helpers.save_checkpoint({
'args': args,
'test_stats': {'stats': test_stats,
'niters': test_loader.niters, 'nruns': test_loader.nruns,
'totaliters': test_loader.iteration_count(),
'ids': test_stats.data_ids,
},
}, False, savedir=args.save_dir, filename='test_stats.pth.tar')
# Close log file & return
logfile.close()
return
## Create a file to log different validation errors over training epochs
statstfile = open(args.save_dir + '/epochtrainstats.txt', 'w')
statsvfile = open(args.save_dir + '/epochvalstats.txt', 'w')
statstfile.write("Epoch, Loss, Ptloss, Consisloss, Flowerrsum, Flowerravg, Consiserr\n")
statsvfile.write("Epoch, Loss, Ptloss, Consisloss, Flowerrsum, Flowerravg, Consiserr\n")
########################
############ Train / Validate
args.imgdisp_freq = 5 * args.disp_freq # Tensorboard log frequency for the image data
train_ids, val_ids = [], []
for epoch in range(args.start_epoch, args.epochs):
# Adjust learning rate
adjust_learning_rate(optimizer, epoch, args.lr_decay, args.decay_epochs, args.min_lr)
# Train for one epoch
train_stats = iterate(train_loader, model, tblogger, args.train_ipe,
mode='train', optimizer=optimizer, epoch=epoch+1)
train_ids += train_stats.data_ids
# Evaluate on validation set
val_stats = iterate(val_loader, model, tblogger, args.val_ipe,
mode='val', epoch=epoch+1)
val_ids += val_stats.data_ids
# Find best losses
val_loss, val_floss, val_fcloss = val_stats.loss.avg, \
val_stats.ptloss.avg.sum(), \
val_stats.ptloss.avg.sum() + val_stats.consisloss.avg.sum()
is_best, is_fbest, is_fcbest = (val_loss < best_loss), (val_floss < best_floss), (val_fcloss < best_fcloss)
prev_best_loss, prev_best_floss, prev_best_fcloss = best_loss, best_floss, best_fcloss
prev_best_epoch, prev_best_fepoch, prev_best_fcepoch = best_epoch, best_fepoch, best_fcepoch
s, sf, sfc = 'SAME', 'SAME', 'SAME'
if is_best:
best_loss, best_epoch, s = val_loss, epoch+1, 'IMPROVED'
if is_fbest:
best_floss, best_fepoch, sf = val_floss, epoch+1, 'IMPROVED'
if is_fcbest:
best_fcloss, best_fcepoch, sfc = val_fcloss, epoch+1, 'IMPROVED'
print('==== [LOSS] Epoch: {}, Status: {}, Previous best: {:.5f}/{}. Current: {:.5f}/{} ===='.format(
epoch+1, s, prev_best_loss, prev_best_epoch, best_loss, best_epoch))
print('==== [FLOSS] Epoch: {}, Status: {}, Previous best: {:.5f}/{}. Current: {:.5f}/{} ===='.format(
epoch+1, sf, prev_best_floss, prev_best_fepoch, best_floss, best_fepoch))
print('==== [FCLOSS] Epoch: {}, Status: {}, Previous best: {:.5f}/{}. Current: {:.5f}/{} ===='.format(
epoch+1, sfc, prev_best_fcloss, prev_best_fcepoch, best_loss, best_fcepoch))
# Write losses to stats file
statstfile.write("{}, {}, {}, {}, {}, {}, {}\n".format(epoch+1, train_stats.loss.avg,
train_stats.ptloss.avg.sum(),
train_stats.consisloss.avg.sum(),
train_stats.flowerr_sum.avg.sum()/args.batch_size,
train_stats.flowerr_avg.avg.sum()/args.batch_size,
train_stats.consiserr.avg.sum()))
statsvfile.write("{}, {}, {}, {}, {}, {}, {}\n".format(epoch + 1, val_stats.loss.avg,
val_stats.ptloss.avg.sum(),
val_stats.consisloss.avg.sum(),
val_stats.flowerr_sum.avg.sum() / args.batch_size,
val_stats.flowerr_avg.avg.sum() / args.batch_size,
val_stats.consiserr.avg.sum()))
# Save checkpoint
helpers.save_checkpoint({
'epoch': epoch+1,
'args' : args,
'best_loss' : best_loss,
'best_flow_loss' : best_floss,
'best_flowconsis_loss' : best_fcloss,
'best_epoch' : best_epoch,
'best_flow_epoch' : best_fepoch,
'best_flowconsis_epoch': best_fcepoch,
'train_stats': {'stats': train_stats,
'niters': train_loader.niters, 'nruns': train_loader.nruns,
'totaliters': train_loader.iteration_count(),
'ids': train_ids,
},
'val_stats' : {'stats': val_stats,
'niters': val_loader.niters, 'nruns': val_loader.nruns,
'totaliters': val_loader.iteration_count(),
'ids': val_ids,
},
'train_iter' : num_train_iter,
'model_state_dict' : model.state_dict(),
'optimizer_state_dict' : optimizer.state_dict(),
}, is_best, is_fbest, is_fcbest, savedir=args.save_dir, filename='checkpoint.pth.tar') #_{}.pth.tar'.format(epoch+1))
print('\n')
# Delete train and val data loaders
del train_loader, val_loader
# Load best model for testing (not latest one)
print("=> loading best model from '{}'".format(args.save_dir + "/model_flow_best.pth.tar"))
checkpoint = torch.load(args.save_dir + "/model_flow_best.pth.tar")
num_train_iter = checkpoint['train_iter']
try:
model.load_state_dict(checkpoint['state_dict']) # BWDs compatibility (TODO: remove)
except:
model.load_state_dict(checkpoint['model_state_dict'])
print("=> loaded best checkpoint (epoch {}, train iter {})"
.format(checkpoint['epoch'], num_train_iter))
best_epoch = checkpoint['best_epoch'] if 'best_epoch' in checkpoint else 0
best_fepoch = checkpoint['best_flow_epoch'] if 'best_flow_epoch' in checkpoint else 0
best_fcepoch = checkpoint['best_flowconsis_epoch'] if 'best_flowconsis_epoch' in checkpoint else 0
print('==== Best validation loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_loss'],
best_epoch))
print('==== Best validation flow loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_flow_loss'],
best_fepoch))
print('==== Best validation flow-consis loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_flowconsis_loss'],
best_fcepoch))
# Do final testing (if not asked to evaluate)
# (don't create the data loader unless needed, creates 4 extra threads)
print('==== Evaluating trained network on test data ====')
args.imgdisp_freq = 10 * args.disp_freq # Tensorboard log frequency for the image data
sampler = torch.utils.data.dataloader.SequentialSampler(test_dataset) # Run sequentially along the test dataset
test_loader = DataEnumerator(util.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, sampler=sampler, pin_memory=args.use_pin_memory,
collate_fn=test_dataset.collate_batch))
test_stats = iterate(test_loader, model, tblogger, len(test_loader),
mode='test', epoch=args.epochs)
print('==== Best validation loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_loss'],
best_epoch))
print('==== Best validation flow loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_flow_loss'],
best_fepoch))
print('==== Best validation flow-consis loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_flowconsis_loss'],
best_fcepoch))
# Save final test error
helpers.save_checkpoint({
'args': args,
'test_stats': {'stats': test_stats,
'niters': test_loader.niters, 'nruns': test_loader.nruns,
'totaliters': test_loader.iteration_count(),
'ids': test_stats.data_ids,
},
}, is_best=False, savedir=args.save_dir, filename='test_stats.pth.tar')
# Write test stats to val stats file at the end
statsvfile.write("{}, {}, {}, {}, {}, {}, {}\n".format(checkpoint['epoch'], test_stats.loss.avg,
test_stats.ptloss.avg.sum(),
test_stats.consisloss.avg.sum(),
test_stats.flowerr_sum.avg.sum() / args.batch_size,
test_stats.flowerr_avg.avg.sum() / args.batch_size,
test_stats.consiserr.avg.sum()))
statsvfile.close(); statstfile.close()
# Close log file
logfile.close()
################# HELPER FUNCTIONS
### Main iterate function (train/test/val)
def iterate(data_loader, model, tblogger, num_iters,
mode='test', optimizer=None, epoch=0):
# Get global stuff?
global num_train_iter
# Setup avg time & stats:
data_time, fwd_time, bwd_time, viz_time = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
# Save all stats into a namespace
stats = argparse.Namespace()
stats.loss, stats.ptloss, stats.consisloss = AverageMeter(), AverageMeter(), AverageMeter()
stats.flowerr_sum, stats.flowerr_avg = AverageMeter(), AverageMeter()
stats.motionerr_sum, stats.motionerr_avg = AverageMeter(), AverageMeter()
stats.stillerr_sum, stats.stillerr_avg = AverageMeter(), AverageMeter()
stats.consiserr = AverageMeter()
stats.data_ids = []
if mode == 'test':
# Save the flow errors and poses if in "testing" mode
stats.motion_err, stats.motion_npt, stats.still_err, stats.still_npt = [], [], [], []
stats.predposes, stats.predtransposes, stats.preddeltas, stats.ctrls = [], [], [], []
stats.poses = []
# stats.predmasks, stats.masks = [], []
# stats.gtflows, stats.predflows = [], []
# stats.pts = []
# Switch model modes
train = (mode == 'train')
if train:
assert (optimizer is not None), "Please pass in an optimizer if we are iterating in training mode"
model.train()
else:
assert (mode == 'test' or mode == 'val'), "Mode can be train/test/val. Input: {}"+mode
model.eval()
# Create a closure to get the outputs of the delta-se3 prediction layers
#predictions = {}
#def get_output(name):
# def hook(self, input, result):
# predictions[name] = result
# return hook
#model.transitionmodel.deltase3decoder.register_forward_hook(get_output('deltase3'))
# Point predictor
# NOTE: The prediction outputs of both layers are the same if mask normalization is used, if sigmoid the outputs are different
# NOTE: Gradients are same for pts & tfms if mask normalization is used, always different for the masks
ptpredlayer = se3nn.NTfm3D()
# Type of loss (mixture of experts = wt sharpening or sigmoid)
mex_loss = True
# Run an epoch
print('========== Mode: {}, Starting epoch: {}, Num iters: {} =========='.format(
mode, epoch, num_iters))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
pt_wt, consis_wt = args.pt_wt * args.loss_scale, args.consis_wt * args.loss_scale
identfm = util.req_grad(torch.eye(4).view(1,1,4,4).expand(1,args.num_se3-1,4,4).narrow(2,0,3).to(device), False)
for i in xrange(num_iters):
# ============ Load data ============#
# Start timer
start = time.time()
# Get a sample
j, sample = data_loader.next()
stats.data_ids.append(sample['id'].clone())
# Get inputs and targets (as variables)
# Currently batchsize is the outer dimension
pts = util.req_grad(sample['points'].to(device), train) # Need gradients
ctrls = util.req_grad(sample['controls'].to(device), train) # Need gradients
fwdflows = util.req_grad(sample['fwdflows'].to(device), False) # No gradients
fwdvis = util.req_grad(sample['fwdvisibilities'].float().to(device), False)
# Get jt angles
jtangles = util.req_grad(sample['actctrlconfigs'].to(device), train) #[:, :, args.ctrlids_in_state].type(deftype), requires_grad=train)
# Measure data loading time
data_time.update(time.time() - start)
# ============ FWD pass + Compute loss ============#
# Start timer
start = time.time()
########## Run a FWD pass through the network
# Predict the poses and masks
pose0, initmask = model.forward_pose_mask([pts[:, 0], jtangles[:, 0]], train_iter=num_train_iter)
pose1 = model.forward_only_pose([pts[:, 1], jtangles[:, 1]])
poses = [pose0, pose1]
# Make next-pose predictions & corresponding 3D point predictions using the transition model
deltapose, transpose = model.forward_next_pose(pose0, ctrls[:, 0], jtangles[:, 0], None)
deltaposes = [deltapose]
transposes = [transpose]
# Make prediction of next pts
nextpts = ptpredlayer(pts[:,0], initmask, deltapose)
predpts = [nextpts]
########## Losses
### 3D loss
# If motion-normalized loss, pass in GT flows
inputs = nextpts - pts[:, 0] # Delta flow for that step (note that gradients only go to the mask & deltas)
targets = fwdflows[:, 0]
if args.motion_norm_loss:
motion = targets # Use either delta-flows or full-flows
currptloss = pt_wt * ctrlnets.MotionNormalizedLoss3D(inputs, targets, motion=motion,
loss_type=args.loss_type, wts=fwdvis[:, 0])
else:
currptloss = pt_wt * ctrlnets.Loss3D(inputs, targets, loss_type=args.loss_type, wts=fwdvis[:, 0])
### Consistency loss (between t & t+1)
# Poses from encoder @ t & @ t+1 should be separated by delta from t->t+1
# NOTE: For the consistency loss, the loss is only backpropagated to the encoder poses, not to the deltas
delta = deltapose.detach() # Break the graph here
nextpose_trans = se3nn.ComposeRtPair()(delta, poses[0])
currconsisloss = consis_wt * ctrlnets.BiMSELoss(nextpose_trans, poses[1])
# Append to total loss
loss = currptloss + currconsisloss
ptloss = torch.Tensor([currptloss.item()])
consisloss = torch.Tensor([currconsisloss.item()])
# Update stats
stats.ptloss.update(ptloss)
stats.consisloss.update(consisloss)
stats.loss.update(loss.item())
# Measure FWD time
fwd_time.update(time.time() - start)
# ============ Gradient backpass + Optimizer step ============#
# Compute gradient and do optimizer update step (if in training mode)
if (train):
# Start timer
start = time.time()
# Backward pass & optimize
optimizer.zero_grad() # Zero gradients
loss.backward() # Compute gradients - BWD pass
optimizer.step() # Run update step
# Increment number of training iterations by 1
num_train_iter += 1
# Measure BWD time
bwd_time.update(time.time() - start)
# ============ Visualization ============#
# Make sure to not add to the computation graph (will memory leak otherwise)!
with torch.no_grad():
# Start timer
start = time.time()
# Compute flow predictions and errors
# NOTE: I'm using CUDA here to speed up computation by ~4x
predflows = torch.cat([(x - pts[:,0]).unsqueeze(1) for x in predpts], 1)
flows = fwdflows
if args.use_only_da_for_flows:
# If using only DA then pts that are not visible will not have GT flows, so we shouldn't take them into
# account when computing the flow errors
flowerr_sum, flowerr_avg, \
motionerr_sum, motionerr_avg,\
stillerr_sum, stillerr_avg,\
motion_err, motion_npt,\
still_err, still_npt = helpers.compute_masked_flow_errors(predflows * fwdvis, flows) # Zero out flows for non-visible points
else:
flowerr_sum, flowerr_avg, \
motionerr_sum, motionerr_avg, \
stillerr_sum, stillerr_avg, \
motion_err, motion_npt, \
still_err, still_npt = helpers.compute_masked_flow_errors(predflows, flows)
# Update stats
stats.flowerr_sum.update(flowerr_sum); stats.flowerr_avg.update(flowerr_avg)
stats.motionerr_sum.update(motionerr_sum); stats.motionerr_avg.update(motionerr_avg)
stats.stillerr_sum.update(stillerr_sum); stats.stillerr_avg.update(stillerr_avg)
if mode == 'test':
stats.motion_err.append(motion_err); stats.motion_npt.append(motion_npt)
stats.still_err.append(still_err); stats.still_npt.append(still_npt)
# Save poses if in test mode
if (mode == 'test') and (args.detailed_test_stats):
stats.predposes.append([x.cpu().float() for x in poses])
stats.predtransposes.append([x.cpu().float() for x in transposes])
stats.preddeltas.append([x.cpu().float() for x in deltaposes])
stats.ctrls.append(ctrls.cpu().float())
stats.poses.append(sample['poses'])
# stats.predmasks.append(initmask.cpu().float())
# stats.masks.append(sample['masks'][:,0])
# stats.predflows.append(predflows.cpu())
# stats.gtflows.append(flows.cpu())
# stats.pts.append(sample['points'][:,0])
# Compute flow error per mask (if asked to)
#if args.disp_err_per_mask:
# flowloss_mask_sum_fwd, flowloss_mask_avg_fwd, _, _ = compute_flow_errors_per_mask(predflows,
# flows,
# sample['gtmasks'])
### Pose consistency error
# Compute consistency error for display
consiserror, consiserrormax = torch.zeros(args.seq_len), torch.zeros(args.seq_len)
for k in xrange(args.seq_len):
consiserrormax[k] = (poses[k+1] - transposes[k]).abs().max()
consiserror[k] = ctrlnets.BiAbsLoss(poses[k+1], transposes[k])
stats.consiserr.update(consiserror)
# Display/Print frequency
bsz = pts.size(0)
if i % args.disp_freq == 0:
### Print statistics
print_stats(mode, epoch=epoch, curr=i+1, total=num_iters,
samplecurr=j+1, sampletotal=len(data_loader),
stats=stats, bsz=bsz)
### Print stuff if we have weight sharpening enabled
if args.use_wt_sharpening:
try:
noise_std, pow = model.posemaskmodel.compute_wt_sharpening_stats(train_iter=num_train_iter)
except:
noise_std, pow = model.maskmodel.compute_wt_sharpening_stats(train_iter=num_train_iter)
print('\tWeight sharpening => Num training iters: {}, Noise std: {:.4f}, Power: {:.3f}'.format(
num_train_iter, noise_std, pow))
### Print time taken
print('\tTime => Data: {data.val:.3f} ({data.avg:.3f}), '
'Fwd: {fwd.val:.3f} ({fwd.avg:.3f}), '
'Bwd: {bwd.val:.3f} ({bwd.avg:.3f}), '
'Viz: {viz.val:.3f} ({viz.avg:.3f})'.format(
data=data_time, fwd=fwd_time, bwd=bwd_time, viz=viz_time))
### TensorBoard logging
# (1) Log the scalar values
iterct = data_loader.iteration_count() # Get total number of iterations so far
info = {
mode+'-loss': loss.item(),
mode+'-pt3dloss': ptloss.sum(),
mode+'-consisloss': consisloss.sum(),
mode+'-consiserr': consiserror.sum(),
mode+'-consiserrmax': consiserrormax.sum(),
mode+'-flowerrsum': flowerr_sum.sum()/bsz,
mode+'-flowerravg': flowerr_avg.sum()/bsz,
mode+'-motionerrsum': motionerr_sum.sum()/bsz,
mode+'-motionerravg': motionerr_avg.sum()/bsz,
mode+'-stillerrsum': stillerr_sum.sum() / bsz,
mode+'-stillerravg': stillerr_avg.sum() / bsz,
}
if mode == 'train':
info[mode+'-lr'] = args.curr_lr # Plot current learning rate
for tag, value in info.items():
tblogger.scalar_summary(tag, value, iterct)
# (2) Log images & print predicted SE3s
# TODO: Numpy or matplotlib
if i % args.imgdisp_freq == 0:
## Log the images (at a lower rate for now)
id = random.randint(0, sample['points'].size(0)-1)
# Render the predicted and GT poses onto the depth
depths = []
for k in xrange(args.seq_len+1):
gtpose = sample['poses'][id, k]
predpose = poses[k][id].cpu().float()
predposet = transposes[k-1][id].cpu().float() if (k > 0) else None
gtdepth = helpers.normalize_img(sample['points'][id,k,2:].expand(3,args.img_ht,args.img_wd).permute(1,2,0), min=0, max=3)
for n in xrange(args.num_se3):
# Pose_1 (GT/Pred)
if n < gtpose.size(0):
util.draw_3d_frame(gtdepth, gtpose[n], [0,0,1], args.cam_intrinsics[0], pixlength=15.0) # GT pose: Blue
util.draw_3d_frame(gtdepth, predpose[n], [0,1,0], args.cam_intrinsics[0], pixlength=15.0) # Pred pose: Green
if predposet is not None:
util.draw_3d_frame(gtdepth, predposet[n], [1,0,0], args.cam_intrinsics[0], pixlength=15.0) # Transition model pred pose: Red
depths.append(gtdepth)
depthdisp = torch.cat(depths, 1).permute(2,0,1) # Concatenate along columns (3 x 240 x 320*seq_len+1 image)
# Concat the flows, depths and masks into one tensor
flowdisp = torchvision.utils.make_grid(torch.cat([flows.narrow(0,id,1),
predflows.narrow(0,id,1)], 0).cpu().view(-1, 3, args.img_ht, args.img_wd),
nrow=args.seq_len, normalize=True, range=(-0.01, 0.01))
#depthdisp = torchvision.utils.make_grid(sample['points'][id].narrow(1,2,1), normalize=True, range=(0.0,3.0))
maskdisp = torchvision.utils.make_grid(torch.cat([initmask.narrow(0,id,1)], 0).cpu().view(-1, 1, args.img_ht, args.img_wd),
nrow=args.num_se3, normalize=True, range=(0,1))
# Show as an image summary
info = { mode+'-depths': util.to_np(depthdisp.unsqueeze(0)),
mode+'-flows' : util.to_np(flowdisp.unsqueeze(0)),
mode+'-masks' : util.to_np(maskdisp.narrow(0,0,1))
}
for tag, images in info.items():
tblogger.image_summary(tag, images, iterct)
## Print the predicted delta-SE3s
#deltase3s = predictions['deltase3'][id].view(args.num_se3, -1).cpu()
#if len(pivots) > 0:
# deltase3s = torch.cat([deltase3s, pivots[-1][id].view(args.num_se3,-1).cpu()], 1)
#print('\tPredicted delta-SE3s @ t=2:', deltase3s)
## Print the predicted mask values
print('\tPredicted mask stats:')
for k in xrange(args.num_se3):
print('\tMax: {:.4f}, Min: {:.4f}, Mean: {:.4f}, Std: {:.4f}, Median: {:.4f}, Pred 1: {}'.format(
initmask[id,k].max(), initmask[id,k].min(), initmask[id,k].mean(),
initmask[id,k].std(), initmask[id,k].view(-1).cpu().float().median(),
(initmask[id,k] - 1).abs().le(1e-5).sum()))
print('')
# Measure viz time
viz_time.update(time.time() - start)
### Print stats at the end
print('========== Mode: {}, Epoch: {}, Final results =========='.format(mode, epoch))
print_stats(mode, epoch=epoch, curr=num_iters, total=num_iters,
samplecurr=data_loader.niters+1, sampletotal=len(data_loader),
stats=stats)
print('========================================================')
# Return the loss & flow loss
return stats
### Print statistics
def print_stats(mode, epoch, curr, total, samplecurr, sampletotal,
stats, bsz=None):
# Print loss
bsz = args.batch_size if bsz is None else bsz
print('Mode: {}, Epoch: [{}/{}], Iter: [{}/{}], Sample: [{}/{}], Batch size: {}, '
'Loss: {loss.val:.4f} ({loss.avg:.4f})'.format(
mode, epoch, args.epochs, curr, total, samplecurr,
sampletotal, bsz, loss=stats.loss))
# Print flow loss per timestep
for k in xrange(args.seq_len):
print('\tStep: {}, Pt: {:.3f} ({:.3f}), '
'Consis: {:.3f}/{:.4f} ({:.3f}/{:.4f}), '
'Flow => Sum: {:.3f} ({:.3f}), Avg: {:.3f} ({:.3f}), '
'Motion/Still => Sum: {:.3f}/{:.3f}, Avg: {:.3f}/{:.3f}'
.format(
1 + k * args.step_len,
stats.ptloss.val[k], stats.ptloss.avg[k],
stats.consisloss.val[k], stats.consisloss.avg[k],
stats.consiserr.val[k], stats.consiserr.avg[k],
stats.flowerr_sum.val[k] / bsz, stats.flowerr_sum.avg[k] / bsz,
stats.flowerr_avg.val[k] / bsz, stats.flowerr_avg.avg[k] / bsz,
stats.motionerr_sum.avg[k] / bsz, stats.stillerr_sum.avg[k] / bsz,
stats.motionerr_avg.avg[k] / bsz, stats.stillerr_avg.avg[k] / bsz,
))
### Adjust learning rate
def adjust_learning_rate(optimizer, epoch, decay_rate=0.1, decay_epochs=10, min_lr=1e-5):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (decay_rate ** (epoch // decay_epochs))
lr = min_lr if (args.lr < min_lr) else lr # Clamp at min_lr
print("======== Epoch: {}, Initial learning rate: {}, Current: {}, Min: {} =========".format(
epoch, args.lr, lr, min_lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
args.curr_lr = lr
################ RUN MAIN
if __name__ == '__main__':
main()
|
[
"barun@MacBook-Air-2.local"
] |
barun@MacBook-Air-2.local
|
24bb0afa161eea35fad82d7423ec8767db87fdaf
|
a4cb0495dec84d433b00d275a63afcd3f5c296b8
|
/ResNet/layers/IdentityBlock.py
|
992b515223b88207f9b51268c65d2c3dd2395fb7
|
[
"MIT"
] |
permissive
|
panuthept/Models_Corpus
|
370aa5054145ecb20312360e33217c7a743f327b
|
6d9e91eb97829e73d88ecfc4754492f6324ef383
|
refs/heads/master
| 2023-05-25T10:09:40.600017
| 2020-01-11T14:17:44
| 2020-01-11T14:17:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
import tensorflow as tf
class IdentityBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, depth=2):
super().__init__()
if isinstance(filters, list):
assert len(filters) == depth, "Length if filter exceed the layer depth."
if isinstance(kernel_size, list):
assert len(kernel_size) == depth, "Length if kernel_size exceed the layer depth."
self.depth = depth
self.filters = filters if isinstance(filters, list) else [filters for _ in range(depth)]
self.kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size for _ in range(depth)]
self.conv2s = [tf.keras.layers.Conv2D(self.filters[i], self.kernel_size[i], padding="same") for i in range(depth)]
self.bns = [tf.keras.layers.BatchNormalization() for _ in range(depth)]
def call(self, input_tensor, training=False):
x = input_tensor
for i in range(len(self.conv2s) - 1):
x = self.conv2s[i](x)
x = self.bns[i](x, training=training)
x = tf.nn.relu(x)
x = self.conv2s[-1](x)
x = self.bns[-1](x, training=training)
x += input_tensor
x = tf.nn.relu(x)
return x
|
[
"falcon_270394@hotmail.co.th"
] |
falcon_270394@hotmail.co.th
|
e1d829b54b662af945088f6706746423887e21c6
|
123e7109e676d612481368fbf42c09e7490b83ca
|
/仪器使用/PCI6289采集卡/program/pci6289_ao_test.py
|
076b265d46fd57a77f26750a74dc05f9a6a93829
|
[] |
no_license
|
Kaige213/QuantumTransportExperiment
|
ada6c6999cef079113557e4087f653bfdaf135b5
|
0703c61f72e1490682c5ce2b9a8bb82a545c3730
|
refs/heads/main
| 2021-11-27T04:59:13.617922
| 2021-09-22T13:28:37
| 2021-09-22T13:28:37
| 251,171,218
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
import nidaqmx
with nidaqmx.Task() as task:
task.ao_channels.add_ao_voltage_chan("PCI6289/ao0",min_val=-1, max_val=1)
task.write(0.5)
|
[
"yuexiaokai@outlook.com"
] |
yuexiaokai@outlook.com
|
e713e3b650aa8bcd75898ce8b9478770267dcfe3
|
af9d9043a83a751f00f7b805533d87ccce330d21
|
/Portfolio/Datawiz/test_task/test_task/settings.py
|
934b25bd3018fa1bfebe472ca3678cf757ebf827
|
[] |
no_license
|
HeCToR74/Python
|
e664b79593a92daa7d39d402f789812dfc59c19f
|
f448ec0453818d55c5c9d30aaa4f19e1d7ca5867
|
refs/heads/master
| 2023-03-08T13:44:19.961694
| 2022-07-03T19:23:25
| 2022-07-03T19:23:25
| 182,556,680
| 1
| 1
| null | 2023-02-28T15:30:01
| 2019-04-21T16:26:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,363
|
py
|
"""
Django settings for test_task project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_==i0*m4vvexpe%vrgp39%!ay9*^)*8)giv=ur671jbma*21o@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_task.urls'
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_task.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
f6256f707e0944be2fc80c00428ef105ef18d54a
|
44eb88d1e3d9aa4bab66eaa9e096082500906cf5
|
/075.py
|
8620cadcc6d21386ae6ae2f3e010ef04a049611c
|
[
"MIT"
] |
permissive
|
souza-joao/cursoemvideo-python3
|
82f00d6ca7f9bfc201af796ebfc76eed500bc39d
|
b9f747d1083c1c779630b25b321b291d76611901
|
refs/heads/main
| 2023-07-16T09:28:58.441664
| 2021-08-31T15:41:59
| 2021-08-31T15:41:59
| 376,969,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
num = (int(input('Digite um número: ')),
int(input('Digite um número: ')),
int(input('Digite um número: ')),
int(input('Digite um número: ')),)
print(f'O número 9 apareceu {num.count(9)} vez(es).')
if 3 in num:
print(f'O número 3 aparece na {num.index(3) + 1}ª posição.')
else:
print('Você não digitou o número 3.')
print('Os números pares foram ', end='')
for c in num:
if c % 2 == 0:
print(c, end=' ')
'''n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro número: '))
n3 = int(input('Digite outro número: '))
n4 = int(input('Digite mais um número: '))
tup = (n1, n2, n3, n4)
print(f'O número 9 apareceu {tup.count(9)} vezes.')
print('-' * 60)
if tup.count(3) > 0:
print(f'O número 3 está na posição {tup.index(3) + 1}.')
else:
print(f'O número 3 não foi digitado.')
print('-' * 60)
print('Os valores pares foram ', end='')
if tup[0] % 2 == 0:
print(f'{tup[0]} ', end='')
if tup[1] % 2 == 0:
print(f'{tup[1]} ', end='')
if tup[2] % 2 == 0:
print(f'{tup[2]} ', end='')
if tup[3] % 2 == 0:
print(f'{tup[3]}')'''
|
[
"85567361+souza-joao@users.noreply.github.com"
] |
85567361+souza-joao@users.noreply.github.com
|
254fb14e235ff8c88fb37c0a2d8073e8cd9249a5
|
f474d500b7da4f4069e24fddcde97783a4f3664b
|
/vagrantEnv/lib/python3.5/encodings/kz1048.py
|
8c13cbaa6892121f32a575a95efa7e9b71c453ad
|
[
"Apache-2.0"
] |
permissive
|
Thanh-Lai/chat-bot
|
220a0fd6383181f0cdaf732b5c02f645bd960a28
|
e3007fa6e034d3cccff4615a7eccf0e75bbc1708
|
refs/heads/master
| 2020-04-23T09:39:04.509356
| 2019-02-18T04:56:25
| 2019-02-18T04:56:25
| 171,075,880
| 0
| 0
|
Apache-2.0
| 2019-02-18T04:56:26
| 2019-02-17T03:00:39
|
Python
|
UTF-8
|
Python
| false
| false
| 38
|
py
|
/usr/lib/python3.5/encodings/kz1048.py
|
[
"tlai1122@gmail.com"
] |
tlai1122@gmail.com
|
925028b08297779546c047873b5ba67c870ad692
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_55/59.py
|
09da2bcfaa4c411daa5449e6b502ef93033a8f6c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,319
|
py
|
#!/usr/bin/env python
import sys
filename=sys.argv[1]
inputfile=file(filename, 'r')
numcases=int(inputfile.readline().strip())
for case in range(1,numcases+1):
R, k, N = map(long, inputfile.readline().strip().split())
g = map(long, inputfile.readline().strip().split())
y = 0
first_ride = [None] * N
ride_groups = [None] * N
ride_seats = [None] * N
ride = 0
start = 0
while ride < R:
if first_ride[start] is not None:
break
ridestart = start
seats = 0
groups = 0
while seats + g[start] <= k and groups < N:
seats += g[start]
groups += 1
start += 1
if start >= N:
start = 0
if start == ridestart:
break
first_ride[ridestart] = ride
ride_groups[ridestart] = groups
ride_seats[ridestart] = seats
ride += 1
y += seats
if ride < R:
cyclelen = ride - first_ride[start]
if R - ride >= cyclelen:
cycles = (R - ride) / cyclelen
cycle_euros = 0
cycle_start = start
while True:
cycle_euros += ride_seats[start]
start = (start + ride_groups[start]) % N
ride += 1
if start == cycle_start:
break
y += cycle_euros * cycles
ride += (cycles - 1) * cyclelen
while ride < R:
y += ride_seats[start]
start = (start + ride_groups[start]) % N
ride += 1
print "Case #%d: %d" % (case, y)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
6d8c2fe636110f1a7126213981d239f0d387291c
|
185820587b6a0663f8aac706384fa7e00b236bb9
|
/samples/python/clusqmgr_suspend.py
|
af9c8f5fe63ae203a40c03995e0ed6d08bba2407
|
[
"MIT"
] |
permissive
|
hlkiltas/mqweb
|
9c70252d6af442e45de3a18726b69dcda65db94a
|
c651014765018d32b5d49054ff8c36618d50093d
|
refs/heads/master
| 2020-03-27T01:51:56.858586
| 2017-09-22T12:11:20
| 2017-09-22T12:11:20
| 145,749,223
| 2
| 0
|
MIT
| 2018-08-22T18:46:21
| 2018-08-22T18:46:20
| null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
'''
Supends a queuemanager in a cluster
MQWeb runs on localhost and is listening on port 8081.
'''
import sys
import json
import httplib
import socket
if len(sys.argv) < 3 :
print 'Please pass me the name of a queuemanager and a clustername as argument'
sys.exit(1)
url = "/api/clusqmgr/suspend/" + sys.argv[1] + '/' + sys.argv[2];
try:
conn = httplib.HTTPConnection('localhost', 8081)
conn.request('GET', url)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print ('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code'])
)
else:
print "Suspend command succesfully send."
except httplib.HTTPException as e:
print ('An HTTP error occurred while suspending the queuemanager in the cluster: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
|
[
"franky.braem@gmail.com"
] |
franky.braem@gmail.com
|
6f990446cead9603dedfef48a68c2ec8c67a8073
|
e970c48f83f74e6c28d51b556c47ae07f18f22fa
|
/code/sampling_methods.py
|
d0af413c62743488539e35d91194acfa1ad2c58d
|
[] |
no_license
|
Loopdiloop/fys-stk4155-project2
|
ddd812c3eaea525fa6b959d39391f1ece7877332
|
9efb4fb92581faa64fda188a1a600a47fc4bb594
|
refs/heads/master
| 2020-09-06T00:35:33.097309
| 2019-11-14T00:13:16
| 2019-11-14T00:13:16
| 220,260,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,147
|
py
|
import numpy as np
import statistical_functions as statistics
from fit_matrix import fit
from functions import franke_function
import copy
class sampling():
def __init__(self, inst):
self.inst = inst
def kfold_cross_validation(self, k, method, deg=5, lambd=1):
"""Method that implements the k-fold cross-validation algorithm. It takes
as input the method we want to use. if "least squares" an ordinary OLS will be evaulated.
if "ridge" then the ridge method will be used, and respectively the same for "lasso"."""
inst = self.inst
lowest_mse = 1e5
self.mse = []
self.R2 = []
self.mse_train = []
self.R2_train = []
self.bias = []
self.variance = []
design_matrix = fit(inst)
whole_DM = design_matrix.create_design_matrix(deg = deg).copy() #design matrix for the whole dataset
whole_z = inst.z_1d.copy() #save the whole output
for i in range(self.inst.k):
#pick the i-th set as test
inst.sort_training_test_kfold(i)
inst.fill_array_test_training()
design_matrix.create_design_matrix(deg = deg) #create design matrix for the training set, and evaluate
if method == "least squares":
z_train, beta_train = design_matrix.fit_design_matrix_numpy()
elif method == "ridge":
z_train, beta_train = design_matrix.fit_design_matrix_ridge(lambd)
elif method == "lasso":
z_train, beta_train = design_matrix.fit_design_matrix_lasso(lambd)
else:
sys.exit("Wrongly designated method: ", method, " not found")
#Find out which values get predicted by the training set
X_test = design_matrix.create_design_matrix(x=inst.test_x_1d, y=inst.test_y_1d, z=inst.test_z_1d, N=inst.N_testing, deg=deg)
z_pred = design_matrix.test_design_matrix(beta_train, X=X_test)
#Take the real values from the dataset for comparison
z_test = inst.test_z_1d
#Calculate the prediction for the whole dataset
whole_z_pred = design_matrix.test_design_matrix(beta_train, X=whole_DM)
# Statistically evaluate the training set with test and predicted solution.
mse, calc_r2 = statistics.calc_statistics(z_test, z_pred)
# Statistically evaluate the training set with itself
mse_train, calc_r2_train = statistics.calc_statistics(inst.z_1d, z_train)
# Get the values for the bias and the variance
bias, variance = statistics.calc_bias_variance(z_test, z_pred)
self.mse.append(mse)
self.R2.append(calc_r2)
self.mse_train.append(mse_train)
self.R2_train.append(calc_r2_train)
self.bias.append(bias)
self.variance.append(variance)
# If needed/wanted:
if abs(mse) < lowest_mse:
lowest_mse = abs(mse)
self.best_predicting_beta = beta_train
|
[
"mamoll@uio.no"
] |
mamoll@uio.no
|
79ec05a20274e044cf8660379e72fb5c8f2319f4
|
405bc4aee79e340a3b41f0f8e3f143118caa7cf6
|
/hcipy/atmosphere/standard_atmosphere.py
|
1442788a1ad31dcd2827ffc4c897f06293d2af69
|
[
"MIT"
] |
permissive
|
rahulbhadani/hcipy
|
c6c2fd487974ef7534a68f2a5036dc418609fc0a
|
b52726cb9502b5225ddff9d7b1ff417f2350cda8
|
refs/heads/master
| 2020-07-06T07:23:43.688016
| 2019-08-02T12:00:28
| 2019-08-02T12:00:28
| 202,938,491
| 0
| 0
|
MIT
| 2019-08-17T22:58:06
| 2019-08-17T22:58:05
| null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
from .infinite_atmospheric_layer import InfiniteAtmosphericLayer
from .atmospheric_model import MultiLayerAtmosphere
import numpy as np
def make_standard_atmospheric_layers(input_grid, L0=10):
heights = np.array([500, 1000, 2000, 4000, 8000, 16000])
velocities = np.array([10, 10, 10, 10, 10, 10])
Cn_squared = np.array([0.2283, 0.0883, 0.0666, 0.1458, 0.3350, 0.1350]) * 1e-12
layers = []
for h, v, cn in zip(heights, velocities, Cn_squared):
layers.append(InfiniteAtmosphericLayer(input_grid, cn, L0, v, h, 2))
return layers
|
[
"por@strw.leidenuniv.nl"
] |
por@strw.leidenuniv.nl
|
3e1738529ae55e62ae6843901eca2eb0d436e07a
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractIntheinkpotfictionWordpressCom.py
|
5a22827f09f4623da612321d5379b4873ab2b614
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 576
|
py
|
def extractIntheinkpotfictionWordpressCom(item):
'''
Parser for 'intheinkpotfiction.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
44891d7b9820b9884945e121c98a187821ebb72e
|
ef92b7c2ade5905ada8da7a55e7708f318017a9f
|
/test/__init__.py
|
8c7f1088a0651c40bbc94c63dbc66ca8a0cad193
|
[] |
no_license
|
nolanbconaway/raspi-nyc-train-display
|
1bba781b6420c1e96a29279caeb14b6ade04d918
|
4865629fb97788d649dfd75a27622b5cfa02638b
|
refs/heads/master
| 2020-07-06T13:52:19.100199
| 2020-04-06T02:06:29
| 2020-04-06T02:06:29
| 203,038,726
| 0
| 0
| null | 2020-04-06T02:06:30
| 2019-08-18T17:56:57
|
Python
|
UTF-8
|
Python
| false
| false
| 309
|
py
|
"""Test package."""
def epoch_to_datetime(epoch):
"""Convert epoch time into a datetime in NYC timezone."""
import datetime
import pytz
return (
pytz.timezone("UTC")
.localize(datetime.datetime.utcfromtimestamp(epoch))
.astimezone(pytz.timezone("US/Eastern"))
)
|
[
"nolanbconaway@gmail.com"
] |
nolanbconaway@gmail.com
|
856fd644e4807971a8bf3571c1cb7ce181e1716d
|
72ae28eaff4a6f8239392409069782bf9a827a68
|
/setup.py
|
4ff5f8dc5cac880a51e4e283a489bfc630c258c1
|
[] |
no_license
|
GargiVyas31/Elena
|
d8030547475ca90a1cb745b5c762b51af12bfe92
|
98453b42219d16826a6461135bfdd7bbc4cf91ec
|
refs/heads/master
| 2023-01-24T00:45:16.900630
| 2020-11-24T22:50:03
| 2020-11-24T22:50:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
import os
from setuptools import setup, find_packages
import src
pkg_scripts = []
setup(
name=src.__name__,
long_description='ELena',
version='1.0.0',
platforms=['Any'],
scripts=pkg_scripts,
provides=[],
namespace_packages=[],
packages=find_packages(),
include_package_data=True,
package_data={'src' : []},
zip_safe=False,
)
|
[
"sayantan@hp.com"
] |
sayantan@hp.com
|
90feabc37881c6074fed4004e3e610421ad0bb4e
|
b76947d520fb1f867ed7165403cbcc47caabd5cb
|
/hivemind_powerball/hive/apps.py
|
7c2876c4b0a631b48c502bb4c5060e5326ea5b64
|
[] |
no_license
|
peter-stratton/hivemind_powerball
|
abf3b0c21a3e92be45e6271d701ea1ce628960cc
|
f1e650bb26c23a0c42b05ac6826105bade0d8d3b
|
refs/heads/master
| 2021-01-22T02:17:45.452114
| 2017-02-09T17:42:02
| 2017-02-09T17:42:02
| 81,042,442
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from django.apps import AppConfig
class HiveConfig(AppConfig):
"""Config object for the hive app"""
name = 'hive'
|
[
"pstratton@jornaya.com"
] |
pstratton@jornaya.com
|
0a77844aeb0ff0c47c3b8ac2e106430f4a5deb65
|
9e3eef34a21a60610c4dc9f5c5fec809a8cb7706
|
/lab2/ex1.py
|
d1c06a877a652f35276089666fd5e8bca69c1930
|
[] |
no_license
|
toma-ungureanu/FII-Python
|
f7df993a985f82dfed793f7549852abbb191bdee
|
60a9b657db7aa09b7e9d42402f4689ca7df615ac
|
refs/heads/master
| 2020-08-28T23:40:22.382485
| 2020-01-22T04:28:35
| 2020-01-22T04:28:35
| 217,855,104
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
def printFibonacciNumbers(n):
f1 = 0
f2 = 1
if n < 1:
return
for x in range(0, n):
print(f2, end=" ")
next = f1 + f2
f1 = f2
f2 = next
# Driven code
printFibonacciNumbers(int(input("Primele x numere Fibonacci: ")))
|
[
"toma.florin.ungureanu@gmail.com"
] |
toma.florin.ungureanu@gmail.com
|
c510739d0552b39eefb1a37c3464536b93358cc2
|
e0c26c8e9ffc94dd5b1f1757230fd8779dc244bc
|
/cell/synapse/__init__.py
|
21aed928dd6e1bfc544e87bc35ed7278a595e4e9
|
[] |
no_license
|
mpelko/neurovivo
|
6804e381a78a03164785bf6fa24f84f123a1201f
|
10923140da270693988313b36ad6d0ad42bf529a
|
refs/heads/master
| 2016-09-09T18:55:59.331807
| 2014-03-25T18:01:38
| 2014-03-25T18:01:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
from exp2syn_AMPA_synapse import *
from exp2syn_GABA_synapse import *
|
[
"mpelko@gmail.com"
] |
mpelko@gmail.com
|
716beeecaa9af980ba1af4c2e9ae8ada8085d998
|
86fb27e01bb1f2d203252aad4e375fba853f30ee
|
/KNN.py
|
5ad9ccbdbb3e3cd1351e56d405d81e1cc96aefcc
|
[] |
no_license
|
Bhuvaneshwari-Anand/Kirana
|
e0e8a1c49732ae5e19bc2459db7efd423ce8b5ff
|
d3651b7e6c0e8390c1659e6d404f506e5201c797
|
refs/heads/main
| 2023-01-12T21:38:05.905860
| 2020-11-19T15:01:08
| 2020-11-19T15:01:08
| 314,262,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
import math
import pandas
import numpy as np
#the dimensions of the item as input
x = float(input())
y = float(input())
#reading the data from the csv file
data = pandas.read_csv("F:\Contest\Infra Mind\Excel and CSV\DatasetFinal.csv")
#store the dataset items in separate arrays for each column
iden = data["ID"].values
item = data["Item"].values
height = data["Height"].values
width = data["Width"].values
#the array data are now stored in separate lists
pid = np.array(iden).tolist()
pitem = np.array(item).tolist()
h = np.array(height).tolist()
w = np.array(width).tolist()
# n is the number of items present in the dataset
n = len(pitem)
#declare a list to store the error range
RangeErr = []
#calculate the error range for each item with the given input and store in the list
for i in range(0,n):
temp = ((h[i] - x)**2) + ((w[i] - y)**2)
temp1 = math.sqrt(temp)
RangeErr.append(temp1)
#postion of the output
position = RangeErr.index(min(RangeErr))
#print the outputs
print("The item is ",end="")
print(pitem[position])
print("The product ID is ",end="")
print(pid[position])
|
[
"bhuvaneshwari119anand@gmail.com"
] |
bhuvaneshwari119anand@gmail.com
|
eeb5073afecbaf0f35097a0d4970f139fc0282fd
|
014e9a6f3d48ffa7b9ee759904d2e33284a6f4d6
|
/api/caoloapi/model/auth.py
|
c73941f6992e52e8c9728cbae96791221e95e3a7
|
[
"MIT"
] |
permissive
|
kissmikijr/caolo-backend
|
33c0262239182b96d1215677c45065b4ef90455b
|
efec05bb793bd40951cb4e5ae4e930d972f63d36
|
refs/heads/master
| 2023-09-04T01:09:50.068148
| 2021-10-18T22:00:59
| 2021-10-18T22:06:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
from datetime import datetime as dt, timedelta
from passlib.context import CryptContext
from jose import jwt
SECRET_KEY = "fe9fb923daa2a5c34a57b6da5d807a1e9cb48d4afee5c10095bab37bcf860059"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
PEPPER_RANGE = (128, 139, 3)
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def __concatpw(pw: str, salt: str, pepper):
return f"{pw}{salt}{pepper}"
def verifypw(plain, salt, pepper, hashed_pw):
pw = __concatpw(plain, salt, pepper)
return pwd_context.verify(pw, hashed_pw)
def hashpw(pw: str, salt: str, pepper):
return pwd_context.hash(__concatpw(pw, salt, pepper))
def create_access_token(data: dict):
payload = data.copy()
payload.update({"exp": dt.utcnow() + timedelta(minutes=15)})
return jwt.encode(payload, SECRET_KEY, algorithm=ALGORITHM)
def decode_access_token(token: str):
"""
raises jose.JWTError or AssertionError on invalid token
"""
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
assert "sub" in payload
return payload
|
[
"littlesnorrboy@gmail.com"
] |
littlesnorrboy@gmail.com
|
ad73927538d2a6b51e3e9da4eaa96818ced5e08a
|
f714db4463dd37fc33382364dc4b1963a9053e49
|
/tests/sentry/event_manager/interfaces/test_frame.py
|
22dd3b8b5756050429bafb0bd12c3db6daa422ae
|
[
"BUSL-1.1",
"Apache-2.0"
] |
permissive
|
macher91/sentry
|
92171c2ad23564bf52627fcd711855685b138cbd
|
dd94d574403c95eaea6d4ccf93526577f3d9261b
|
refs/heads/master
| 2021-07-07T08:23:53.339912
| 2020-07-21T08:03:55
| 2020-07-21T08:03:55
| 140,079,930
| 0
| 0
|
BSD-3-Clause
| 2020-05-13T11:28:35
| 2018-07-07T11:50:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import pytest
from sentry import eventstore
from sentry.event_manager import EventManager
@pytest.fixture
def make_frames_snapshot(insta_snapshot):
def inner(data):
mgr = EventManager(data={"stacktrace": {"frames": [data]}})
mgr.normalize()
evt = eventstore.create_event(data=mgr.get_data())
frame = evt.interfaces["stacktrace"].frames[0]
insta_snapshot({"errors": evt.data.get("errors"), "to_json": frame.to_json()})
return inner
@pytest.mark.parametrize(
"input",
[
{"filename": 1},
{"filename": "foo", "abs_path": 1},
{"function": 1},
{"module": 1},
{"function": "?"},
],
)
def test_bad_input(make_frames_snapshot, input):
make_frames_snapshot(input)
@pytest.mark.parametrize(
"x", [float("inf"), float("-inf"), float("nan")], ids=["inf", "neginf", "nan"]
)
def test_context_with_nan(make_frames_snapshot, x):
make_frames_snapshot({"filename": "x", "vars": {"x": x}})
def test_address_normalization(make_frames_snapshot):
make_frames_snapshot(
{
"lineno": 1,
"filename": "blah.c",
"function": "main",
"instruction_addr": 123456,
"symbol_addr": "123450",
"image_addr": "0x0",
}
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
15487621d75896236eb3ebe106a4f8748a6a389b
|
e43b78db4ff598944e58e593610f537f3833d79c
|
/py-faster-rcnn/lib/roi_data_layer/roidb.py
|
93f713e1f127d432736a654ce6fa292eef3b6c67
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
ZJUZQ/Net_caffe
|
577e9b3e80a391d772a21c27639465d539fceb1f
|
bed3c7384a259339c5a0fb2ea34fa0cdd32ddd29
|
refs/heads/master
| 2021-09-08T12:19:37.039970
| 2018-03-09T14:44:24
| 2018-03-09T14:44:24
| 114,853,721
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,356
|
py
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Transform a roidb into a trainable roidb by adding a bunch of metadata."""
import numpy as np
from fast_rcnn.config import cfg
from fast_rcnn.bbox_transform import bbox_transform
from utils.cython_bbox import bbox_overlaps
import PIL
def prepare_roidb(imdb):
"""Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function precomputes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
sizes = [PIL.Image.open(imdb.image_path_at(i)).size
for i in xrange(imdb.num_images)]
roidb = imdb.roidb
# roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
for i in xrange(len(imdb.image_index)):
roidb[i]['image'] = imdb.image_path_at(i)
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
# need gt_overlaps as a dense array for argmax
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes ## gt class that had the max overlap (columns)
roidb[i]['max_overlaps'] = max_overlaps ## max overlap with gt over classes (columns)
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
assert len(roidb) > 0
assert 'max_classes' in roidb[0], 'Did you call prepare_roidb first?'
num_images = len(roidb)
# Infer number of classes from the number of columns in gt_overlaps
num_classes = roidb[0]['gt_overlaps'].shape[1]
for im_i in xrange(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = \
_compute_targets(rois, overlaps=max_overlaps, labels=max_classes) # Compute bounding-box regression targets for an image
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED: ## RPN can only use precomputed normalization because there are no fixed statistics to compute a priori
# Use fixed / precomputed "means" and "stds" instead of empirical values
means = np.tile(
np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS), (num_classes, 1)) # shape = (num_classes, 4)
stds = np.tile(
np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS), (num_classes, 1)) # shape = (num_classes, 4)
else:
# Compute values needed for means and stds
# var(x) = E(x^2) - E(x)^2
class_counts = np.zeros((num_classes, 1)) + cfg.EPS
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in xrange(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
if cls_inds.size > 0:
class_counts[cls] += cls_inds.size
sums[cls, :] += targets[cls_inds, 1:].sum(axis=0)
squared_sums[cls, :] += \
(targets[cls_inds, 1:] ** 2).sum(axis=0)
means = sums / class_counts
stds = np.sqrt(squared_sums / class_counts - means ** 2)
print 'bbox target means:'
print means
print means[1:, :].mean(axis=0) # ignore bg class
print 'bbox target stdevs:'
print stds
print stds[1:, :].mean(axis=0) # ignore bg class
# Normalize targets
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS:
print "Normalizing targets"
for im_i in xrange(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_inds, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_inds, 1:] /= stds[cls, :]
else:
print "NOT normalizing targets"
# These values will be needed for making predictions
# (the predicts will need to be unnormalized and uncentered)
return means.ravel(), stds.ravel() ## Return a contiguous flattened array
def _compute_targets(rois, overlaps, labels):
"""Compute bounding-box regression targets for an image."""
"""
overlaps: max_overlaps of rois
labels: max_classes of rois
return:
[[cls, dx, dy, dw, dh]
...
]
"""
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
if len(gt_inds) == 0:
# Fail if the image has no ground-truth ROIs
return np.zeros((rois.shape[0], 5), dtype=np.float32)
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] ## e.g., 0.5
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(
np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) # compute [dx, dy, dw, dh]
return targets
|
[
"zhouqiang@zju.edu.cn"
] |
zhouqiang@zju.edu.cn
|
f0612efb0fad45a35627608742c836a5c0fd909d
|
ce7c86b8c9a193d421d7e67b794d81f65ef92c1c
|
/Day10/day_10.py
|
4e9b016b695e356eaf9c9bea80808fcfd277dca2
|
[] |
no_license
|
Panagiotis-Zachos/advent-of-code-2020
|
a00f9728c4e332940b23f0c3ab904183914971b5
|
b305b900eed2d96a1997ee58450d9a11aa3137bf
|
refs/heads/main
| 2023-02-11T04:11:41.908362
| 2021-01-02T18:46:11
| 2021-01-02T18:46:11
| 324,453,531
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
adapters = list(map(int, open('input.txt').read().split('\n')))
adapters.sort()
# Part 1
dif1 = 1
dif3 = 1
for i in range(1, len(adapters)):
dif = adapters[i] - adapters[i-1]
if dif == 1: dif1 += 1
elif dif == 3: dif3 += 1
print(dif3*dif1)
# Part 2.1
adapters.append(adapters[-1] + 3)
adapters.insert(0, 0)
paths = [0 for i in range(len(adapters))]
paths[0] = 1
for i in range(1, len(adapters)):
paths[i] = paths[i - 1]
if i > 1 and adapters[i] - adapters[i - 2] <= 3:
paths[i] += paths[i - 2]
if i > 2 and adapters[i] - adapters[ i - 3] <= 3:
paths[i] += paths[i - 3]
print(paths[-1])
# Part 2.2
curr = 0
p1 = 1
p2 = 0
p3 = 0
for i in range(1, len(adapters)):
curr = p1
if i > 1 and adapters[i] - adapters[i - 2] <= 3:
curr += p2
if i > 2 and adapters[i] - adapters[ i - 3] <= 3:
curr += p3
p3, p2, p1 = (p2, p1, curr)
print(curr)
|
[
"noreply@github.com"
] |
noreply@github.com
|
a1e88c8713d1e262f21e9358122e6e5749a99712
|
6472d60797abbb0d2814495943d2aaabb2baf7d9
|
/lecture/lecture.py
|
641025b9fad157e83284bbacdec0cc8df25da8ed
|
[] |
no_license
|
maya-salcedo/webcam-motion-detector
|
4e23336b36a56138becd7b7a636fac38d4c9a8d4
|
8746cd3af917b5e6af74cc7cfea6a69a5a715a43
|
refs/heads/master
| 2023-01-06T17:38:38.566764
| 2020-11-10T19:15:32
| 2020-11-10T19:15:32
| 311,289,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
import cv2
img = cv2.imread("galaxy.jpg", 0) #if you want to read to the image as it is, pass 1,
# grayscale, 0 which is 1 band
# transparency capabilities, -1
print(type(img))
print(img)
print(img.shape)
print(img.ndim)
resized_image = cv2.resize(img, (int(img.shape[1]/2),int(img.shape[0]/2))) #the tuple parameter is the new dimension
cv2.imshow("Galaxy", resized_image)
cv2.imwrite("Galaxy_resized.jpg", resized_image) #method to save the new img
cv2.waitKey(0) # 0: when user click any button, the window will close
# 2000: 2 seconds
cv2.destroyAllWindows() # method to close the window
|
[
"maya.salcedo07@gmail.com"
] |
maya.salcedo07@gmail.com
|
914431d0adb4b2c7286505f6d7737d82868ba8a8
|
699737150c95c92d2acbdf612e931ca6ccd8ba81
|
/a116_buggy_image_ar_version_29-31.py
|
86269fa5d5c1116396cd0cdcc4dd13e0c1e34d6e
|
[] |
no_license
|
AntonioR0211/buggyimage
|
25f25a3a8f03eb007231639ad6dfdc2295ad7cf2
|
899a1a843af4429acd5f5e686028c97448c79962
|
refs/heads/main
| 2023-01-13T20:50:45.582647
| 2020-11-14T07:08:01
| 2020-11-14T07:08:01
| 312,750,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
# a116_buggy_image.py
import turtle as trtl
# instead of a descriptive name of the turtle such as painter,
# a less useful variable name x is used
Spider = trtl.Turtle()
# The pensize makes a circle which is the spider body.
Spider.pensize(40)
Spider.circle(20)
Spider.goto(20,20)
# This displays how much legs will be there.
Leg = 8
y1 = 70
y2 = y1
leglength = 360 / Leg
print("leglength=", leglength)
Spider.pensize(5)
up = 0
# Tests for how many legs are given and draws however many amounts it is.
while (up < Leg):
Spider.goto(0,20)
Spider.setheading(leglength*up - 45)
Spider.forward(y1)
print(Leg < y1)
up = up + 1
Spider.hideturtle()
# Eyes
Spider.penup()
Spider.goto(20,20)
Spider.pendown()
Spider.color("Red")
Spider.circle(0.1)
Spider.penup()
Spider.goto(-20,20)
Spider.pendown()
Spider.circle(0.1)
wn = trtl.Screen()
wn.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
36f4077e738bcd0ad9b6fb9a000f343d5aa57385
|
fe931e4eb729e010add72732889cfcfa0e82663d
|
/test/ParametricNLP_test.py
|
7e32cc47eea227c73a524210f0191ac4c1677cb2
|
[] |
no_license
|
Duam/python-master-thesis-code
|
14d7ae68d94475cc978717dbb2a9df691b5246e5
|
bb1a800612a1f046d2184ae42e00ed5ec0425b06
|
refs/heads/master
| 2023-06-30T07:02:28.369372
| 2021-08-06T15:11:26
| 2021-08-06T15:11:26
| 385,187,111
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
import casadi as cas
from thesis_code.utils.ParametricNLP import ParametricNLP
# Create a test optimization problem
nlp = ParametricNLP(name='test_problem', verbose=True)
# Add decision variables and parameters to the problem
nlp.add_decision_var('x', (3,1))
nlp.add_parameter('lbx', (3,1))
nlp.add_parameter('ubx', (3,1))
nlp.bake_variables()
# Fetch symbolics
x_sym = nlp.get_decision_var('x')
lbx_sym = nlp.get_parameter('lbx')
ubx_sym = nlp.get_parameter('ubx')
print(nlp.struct_w['x'])
# Create a cost function
nlp.set_cost(cas.mtimes([x_sym.T, x_sym]))
# Create an inequality constraint
nlp.add_inequality('x_iq_lbx', x_sym - lbx_sym)
nlp.add_inequality('x_iq_ubx', ubx_sym - x_sym)
# Set the parameters and initial guess
lbx_scalar = 0.5
ubx_scalar = 1.0
params = nlp.struct_p(0)
params['lbx'] = lbx_scalar * cas.DM.ones((3,1))
params['ubx'] = ubx_scalar * cas.DM.ones((3,1))
winit = nlp.struct_w(0)
winit['x'] = cas.DM.zeros((3,1))
# Some options
opts = {}
opts['ipopt.print_info_string'] = 'yes'
opts['ipopt.print_level'] = 3
opts['ipopt.max_iter'] = 1000
# Solve the problem..
# via ipopt
nlp.init(nlpsolver='ipopt')
res_ipopt, stats_ipopt, dum,dum,dum = nlp.solve(winit=winit, param=params)
# via sqpmethod
nlp.init(nlpsolver='sqpmethod')
res_sqp, stats_sqp, dum,dum,dum = nlp.solve(winit=winit, param=params)
# via qpoases
nlp.init(is_qp = True, nlpsolver='qpoases')
res_qp, stats_qp, dum,dum,dum = nlp.solve(winit=winit, param=params)
# Solve the problem
"""
print('x:', res['w']['x'])
print('lambda ubx:', res['lam_h']['x_iq_ubx'])
print('lambda lbx:', res['lam_h']['x_iq_lbx'])
print('Number of iterations:', stats['iter_count'])
"""
|
[
"paul.daum@posteo.de"
] |
paul.daum@posteo.de
|
9d10240ecd698eca008640c0eabba53a44e88d15
|
3d8a2d2124c484a7ac81835296c0a8834af8df6e
|
/one/commands/shell.py
|
1019bee8e283c55218e4903522be915893be2b46
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
DNXLabs/one-cli
|
e067a2a9d49c61494abcd9ba4b63626f656cdbb9
|
37265189ab184e6fa7569c201b181ba5d95a0a2a
|
refs/heads/master
| 2022-12-14T09:15:32.052749
| 2021-09-16T00:08:35
| 2021-09-16T00:08:35
| 253,417,937
| 7
| 1
|
Apache-2.0
| 2021-09-16T00:08:36
| 2020-04-06T06:51:25
|
Python
|
UTF-8
|
Python
| false
| false
| 914
|
py
|
import click
from one.docker.container import Container
from one.docker.image import Image
from one.utils.environment.aws import EnvironmentAws
container = Container()
image = Image()
environment = EnvironmentAws()
SHELL_IMAGE = image.get_image('shell')
@click.command(help='Shell container with awscli and terraform pre-installed.')
@click.argument('args', nargs=-1)
@click.option('-i', '--image', default=SHELL_IMAGE, type=str, help='Docker image to use.')
@click.option('-p', '--port', default=(), type=str, help='Ports to expose from the container.', multiple=True)
def shell(args, image, port):
envs = environment.build().get_env()
command = ''
for arg in args:
command += '%s ' % (arg)
ports = list(port)
container.create(
image=image,
command=command,
ports=ports,
entrypoint='',
volumes=['.:/work'],
environment=envs
)
|
[
"arthurbdiniz@gmail.com"
] |
arthurbdiniz@gmail.com
|
c18140fcd9465540c29e5bf57783ab8ebbab2f3c
|
2cc5ad64d812b94508ac9d817d33072e633231ca
|
/Mission 2/Contest 2.2/Zhou-Xinming-3-2d.py
|
51351a85c5457f3e68127e8c5171bd1b30a5995f
|
[] |
no_license
|
xinmingzh/CS1010X
|
6943c06dcd53730e23b54e1fa3df12734cb7c39a
|
d3613899e82321d06544a7ad6e15d759aaebc988
|
refs/heads/master
| 2021-04-27T03:00:53.173961
| 2018-02-24T05:45:16
| 2018-02-24T05:45:16
| 120,771,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
#
# CS1010X --- Programming Methodology
#
# Mission 2 - 2D Contest
#
# Note that written answers are commented out to allow us to run your
# code easily while grading your problem set.
from runes import *
########
# Task #
########
# You may submit up to 3 entries. Please update your entry number below.
# Entry 3 of 3
# ============
# Write your function here. It should return a rune.
bb = overlay_frac(0, make_cross(make_cross(rcross_bb)), make_cross(rcross_bb))
show(overlay_frac(0, make_cross(turn_upside_down(corner_bb)), scale(0.8, bb)))
|
[
"xinming.zh@gmail.com"
] |
xinming.zh@gmail.com
|
1c02be31ccbc204a6af0d465965f54c16f3cf3bf
|
fd2e10285787a4001665aae69c3c28e09a84ee75
|
/breakout.py
|
46687816234c78a6b1667f801c73be2fbd37b025
|
[] |
no_license
|
laflechejonathan/breakout
|
967c60d2141bfd15730baaf1308f56d59cefa267
|
be72348db2e1450cfb0c83325e4001d09c674bdd
|
refs/heads/master
| 2021-01-18T20:24:01.702072
| 2017-04-05T04:21:45
| 2017-04-05T04:21:45
| 86,960,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,991
|
py
|
import random
import pygame
import math
import geometry
import constants as const
class BrickGrid:
def __init__(self):
empty_horizontal_space = const.SCREEN_WIDTH - (2 + const.NUM_COLUMNS) * const.BRICK_WIDTH
brick_horizontal_space = empty_horizontal_space / const.NUM_COLUMNS
empty_vertical_space = const.SCREEN_HEIGHT / 2 - const.NUM_ROWS * const.BRICK_WIDTH
brick_vertical_space = empty_vertical_space / const.NUM_ROWS
self.bullet_set = set()
self.brick_set = set()
for i in range(const.NUM_COLUMNS):
for j in range(const.NUM_ROWS):
brick_x = (i + 1) * (brick_horizontal_space + const.BRICK_WIDTH)
brick_y = (j + 1) * (brick_vertical_space + const.BRICK_HEIGHT)
rect = geometry.Rect(brick_x, brick_y, const.BRICK_WIDTH, const.BRICK_HEIGHT)
self.brick_set.add(rect)
self.original_brick_count = len(self.brick_set)
def get_num_cleared(self):
return self.original_brick_count - len(self.brick_set)
def reset(self):
self.bullet_set = set()
def render(self, screen):
for b in self.brick_set:
pygame.draw.rect(screen, const.GREEN, [b.x, b.y, b.width, b.height])
for b in self.bullet_set:
pygame.draw.rect(screen, const.GREY, [b.x, b.y, b.width, b.height])
def interact(self):
if random.uniform(0.0, 1.0) < const.PERCENT_BULLET:
candidates = [
b for b in self.brick_set if
not any([other.x == b.x and other.y > b.y for other in self.brick_set])
]
fires_bullet = random.choice(candidates)
bullet_x = fires_bullet.x + (b.width + const.BULLET_WIDTH) / 2
bullet_y = fires_bullet.y + b.height
self.bullet_set.add(geometry.Rect(bullet_x, bullet_y, const.BULLET_WIDTH, const.BULLET_HEIGHT))
remove_set = set()
for b in self.bullet_set:
b.y += const.BULLET_SPEED
if b.y >= const.SCREEN_HEIGHT:
remove_set.add(b)
self.bullet_set -= remove_set
return True
class Paddle:
def __init__(self):
x = const.SCREEN_WIDTH / 2
y = const.SCREEN_HEIGHT - const.PADDLE_HEIGHT - const.PADDLE_SPACING
self.rect = geometry.Rect(x, y, const.PADDLE_WIDTH, const.PADDLE_HEIGHT)
self.speed = const.PADDLE_SPEED
def interact(self):
if pygame.key.get_pressed()[pygame.K_LEFT] != 0:
self.rect.x -= self.speed
if pygame.key.get_pressed()[pygame.K_RIGHT] != 0:
self.rect.x += self.speed
return True
def render(self, screen):
pygame.draw.rect(screen, const.RED, [self.rect.x, self.rect.y, self.rect.width, self.rect.height])
def get_angle_for_x(self, x):
'''
TODO - currently all collisions are 45 degrees which is a bit boring
With this code, depending on point of contact, rotation angle will
vary between min and max angle
'''
x = float(x)
delta = x - self.rect.x
percentage_of_paddle = delta / self.rect.width
degree_range = const.PADDLE_MAX_ANGLE - const.PADDLE_MIN_ANGLE
angle = degree_range * percentage_of_paddle + const.PADDLE_MIN_ANGLE
print 'For x={} in range {}/{}, got angle={}'.format(x, self.rect.x, self.rect.max_x, angle)
return angle
class Ball:
def __init__(self):
self.radius = const.BALL_RADIUS
self.speed = const.BALL_SPEED
self.x = random.randint(0, const.SCREEN_WIDTH)
self.y = const.SCREEN_HEIGHT - const.PADDLE_HEIGHT - const.PADDLE_SPACING - 4 * const.BALL_RADIUS
self.heading = (random.choice([-0.5, 0.5]), -0.5)
self.min_x = 0 + const.BALL_RADIUS
self.max_x = const.SCREEN_WIDTH - const.BALL_RADIUS
self.min_y = 0 + const.BALL_RADIUS
self.max_y = const.SCREEN_HEIGHT
self.paddle_y = const.SCREEN_HEIGHT - const.PADDLE_HEIGHT - const.PADDLE_SPACING - const.BALL_RADIUS
def line_of_movement(self):
current = self.x, self.y
prev = self.x - int(const.BALL_SPEED * self.heading[0]), self.y - int(const.BALL_SPEED * self.heading[1])
return (prev, current)
def interact(self):
self.x += int(const.BALL_SPEED * self.heading[0])
self.y += int(const.BALL_SPEED * self.heading[1])
if self.y >= self.max_y:
return False
return True
def rotate(self, angle):
theta = float(angle) * math.pi / 180.0
x, y = self.heading
new_x = x * math.cos(theta) - y * math.sin(theta)
new_y = x * math.sin(theta) + y * math.cos(theta)
self.heading = (new_x, new_y)
def render(self, screen):
pygame.draw.circle(screen, const.BLUE, (self.x, self.y), self.radius)
def collision_check(ball, paddle, brick_grid):
if ball.x <= ball.min_x or ball.x >= ball.max_x:
ball.heading = (-ball.heading[0], ball.heading[1])
if ball.y <= ball.min_y:
ball.heading = (ball.heading[0], -ball.heading[1])
if paddle.rect.intersect(ball.line_of_movement(), const.BALL_RADIUS) != geometry.Intersection.NONE:
ball.heading = (ball.heading[0], -ball.heading[1])
ball.interact()
for brick in brick_grid.brick_set:
intersection = brick.intersect(ball.line_of_movement(), const.BALL_RADIUS)
if intersection == geometry.Intersection.HORIZONTAL:
ball.heading = (ball.heading[0], -ball.heading[1])
elif intersection == geometry.Intersection.VERTICAL:
ball.heading = (-ball.heading[0], ball.heading[1])
if intersection != geometry.Intersection.NONE:
brick_grid.brick_set.remove(brick)
break
for bullet in brick_grid.bullet_set:
if bullet.y >= paddle.rect.y and bullet.x >= paddle.rect.x and bullet.x <= paddle.rect.max_x:
return False
return True
|
[
"jlafleche@zenefits.com"
] |
jlafleche@zenefits.com
|
1faba7758fe0c892013372af65d23ba65c04f8b8
|
f8f841bba1f3c05c69f3a43297b9d63016f95e2a
|
/format_w2v_file.py
|
f8577089ee49841b33990da82b63543e3fcb28ef
|
[] |
no_license
|
valerie94/russian_nballs
|
2246d764d16f3b0374193bb0ec14c49ad72df544
|
c960de4a7bba40dcd3a7c6e723b1bbea272969f8
|
refs/heads/master
| 2020-05-18T09:13:10.098864
| 2019-06-04T20:31:29
| 2019-06-04T20:31:29
| 184,317,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,991
|
py
|
'''This .py program converts initial file with word2vec features to standard format such as
word_1 feature1 feature2 ... feature256 /n '''
'''This script creates file ru_w2v.txt which is w2v file which is required for constracting nballs '''
import re
def change_format(file_name):#input is the initial file
line_array = []
intial_file = open(file_name)
for line in intial_file:
#replace all special symbols and split by separator
if ("[" in line):
line = line.replace('\t', ' ')
line = line.replace('\n', '')
line = re.sub('\t', ' ', line)
line = line.replace('[', '')
line = line.split(" ")
line.pop(0)
line = list(filter(None, line))
for x in line:
line_array.append(x)
line_array.append(' ')
elif ("]" in line):
line = line.replace('\t', ' ')
line = line.replace(']', '')
line = line.split(" ")
line = list(filter(None, line))
for x in line:
line_array.append(x)
if ("\n" not in x):
line_array.append(' ')
else:
line = line.replace('\n', '')
line = line.split(" ")
line = list(filter(None, line))
for x in line:
line_array.append(x)
line_array.append(" ")
intial_file.close()
return line_array
def write_to_output_file(lines, file_name): #write to the output file
with open(file_name, "w") as file:
file.write("".join(lines))
file.close()
if __name__ == "__main__":
w2v_file = "ru.tsv" # initial source file from https://github.com/Kyubyong/wordvectors, put this file in the project directory or specify the path
output_w2v_file = "ru_w2v.txt" #the name of output file with w2v features
formatted_line = change_format(w2v_file)
write_to_output_file(formatted_line, output_w2v_file)
|
[
"noreply@github.com"
] |
noreply@github.com
|
f4f46508d1a0f02512ff3ef04f883f5f7004be63
|
1bc2a635a93b5bc84606edf9ac2226851cac9e6d
|
/tests/unit/test_business.py
|
99dba73500a5ba0dccb4d31c5d763654cfe9ff9d
|
[
"MIT"
] |
permissive
|
coolkat64/rolling
|
819149cbb1e11a455b93a030477f9da91e2f93e4
|
4c3ee2401128e993a52ac9b52cdbd32e17728129
|
refs/heads/master
| 2022-11-29T00:35:14.058665
| 2020-07-31T20:37:15
| 2020-07-31T20:37:15
| 285,312,272
| 0
| 0
|
MIT
| 2020-08-05T14:25:48
| 2020-08-05T14:25:47
| null |
UTF-8
|
Python
| false
| false
| 37,466
|
py
|
# coding: utf-8
import typing
from aiohttp import ClientResponse
from aiohttp.test_utils import TestClient
import pytest
from rolling.kernel import Kernel
from rolling.model.character import CharacterModel
from rolling.server.controller.business import ALL_OF_THEM
from rolling.server.controller.business import ONE_OF_THEM
from rolling.server.document.business import OfferDocument
from rolling.server.document.business import OfferItemDocument
from rolling.server.document.business import OfferItemPosition
from rolling.server.document.business import OfferOperand
from rolling.server.document.business import OfferStatus
from rolling.server.document.universe import UniverseStateDocument
from tests.fixtures import create_stuff
from tests.fixtures import description_serializer
EXPECTED_PLASTIC_BOTTLE_NAME = "Plastic bottle (1)"
EXPECTED_PLASTIC_BOTTLE_NAME_ = "(!) Plastic bottle (1)"
def _add_items(kernel: Kernel, offer_id: int) -> None:
kernel.server_db_session.add(
OfferItemDocument(
offer_id=offer_id,
position=OfferItemPosition.REQUEST.value,
resource_id="RED_WINE",
quantity=1.5,
)
)
kernel.server_db_session.add(
OfferItemDocument(
offer_id=offer_id,
position=OfferItemPosition.REQUEST.value,
stuff_id="STONE_HAXE",
quantity=1,
)
)
kernel.server_db_session.add(
OfferItemDocument(
offer_id=offer_id,
position=OfferItemPosition.OFFER.value,
resource_id="WOOD",
quantity=0.5,
)
)
kernel.server_db_session.add(
OfferItemDocument(
offer_id=offer_id,
position=OfferItemPosition.OFFER.value,
stuff_id="LEATHER_JACKET",
quantity=1,
)
)
@pytest.fixture
def xena_permanent_or_offer(worldmapc_xena_model: CharacterModel, worldmapc_kernel: Kernel):
offer_doc = OfferDocument(
character_id=worldmapc_xena_model.id,
title="OfferTitle",
request_operand=OfferOperand.OR.value,
offer_operand=OfferOperand.OR.value,
permanent=True,
status=OfferStatus.OPEN.value,
)
worldmapc_kernel.server_db_session.add(offer_doc)
worldmapc_kernel.server_db_session.commit()
_add_items(worldmapc_kernel, offer_doc.id)
worldmapc_kernel.server_db_session.commit()
return offer_doc
@pytest.fixture
def xena_permanent_and_offer(worldmapc_xena_model: CharacterModel, worldmapc_kernel: Kernel):
offer_doc = OfferDocument(
character_id=worldmapc_xena_model.id,
title="OfferTitle",
request_operand=OfferOperand.AND.value,
offer_operand=OfferOperand.AND.value,
permanent=True,
status=OfferStatus.OPEN.value,
)
worldmapc_kernel.server_db_session.add(offer_doc)
worldmapc_kernel.server_db_session.commit()
_add_items(worldmapc_kernel, offer_doc.id)
worldmapc_kernel.server_db_session.commit()
return offer_doc
class TestBusiness:
async def _assert_owned_offers(
self,
kernel: Kernel,
web: TestClient,
character: CharacterModel,
count: int,
names: typing.Optional[typing.List[str]] = None,
) -> None:
names = names or []
# main page
resp: ClientResponse = await web.post(f"/business/{character.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert f"Voir les offres que vous proposez ({count} en cours)" in item_labels
if not names:
return
# offers page
resp: ClientResponse = await web.post(f"/business/{character.id}/offers")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
for name in names:
assert next(l for l in item_labels if name in str(l))
async def _assert_edit_offer(
self,
kernel: Kernel,
web: TestClient,
character: CharacterModel,
offer_id: int,
request_operand_str: str = ONE_OF_THEM,
request_item_names: typing.Optional[typing.List[str]] = None,
request_item_names_not: typing.Optional[typing.List[str]] = None,
offer_operand_str: str = ONE_OF_THEM,
offer_item_names: typing.Optional[typing.List[str]] = None,
offer_item_names_not: typing.Optional[typing.List[str]] = None,
open_: bool = False,
) -> None:
request_item_names = request_item_names or []
request_item_names_not = request_item_names_not or []
offer_item_names = offer_item_names or []
offer_item_names_not = offer_item_names_not or []
resp = await web.post(f"/business/{character.id}/offers/{offer_id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
form_item_by_name = {i.name: i for i in descr.items[0].items}
form_item_labels = [i.label for i in descr.items[0].items]
assert form_item_by_name["request_operand"].value == request_operand_str
assert form_item_by_name["offer_operand"].value == offer_operand_str
for request_item_name in request_item_names:
assert request_item_name in form_item_labels
for offer_item_name in offer_item_names:
assert offer_item_name in form_item_labels
for request_item_name_not in request_item_names_not:
assert request_item_name_not not in form_item_labels
for offer_item_name_not in offer_item_names_not:
assert offer_item_name_not not in form_item_labels
if not open_:
assert "Activer" == descr.items[1].label
else:
assert "Désactiver" == descr.items[1].label
async def _assert_read_offer(
self,
kernel: Kernel,
web: TestClient,
owner: CharacterModel,
character: CharacterModel,
offer_id: int,
request_operand_str: str = ONE_OF_THEM,
have_not_item_names: typing.Optional[typing.List[str]] = None,
have_item_names: typing.Optional[typing.List[str]] = None,
offer_operand_str: str = ONE_OF_THEM,
offer_item_names: typing.Optional[typing.List[str]] = None,
owner_can_make_deal: bool = True,
can_make_deal: bool = False,
) -> None:
have_not_item_names = have_not_item_names or []
have_item_names = have_item_names or []
offer_item_names = offer_item_names or []
resp = await web.post(f"/business/{character.id}/see-offer/{owner.id}/{offer_id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
form_item_labels = [i.label or i.text for i in descr.items]
assert f"Eléments demandé(s) ({request_operand_str})" in form_item_labels
assert f"Eléments donné(s) ({offer_operand_str})" in form_item_labels
for have_not_item_name in have_not_item_names:
assert f"(X) {have_not_item_name}" in form_item_labels
for have_item_name in have_item_names:
assert f"(V) {have_item_name}" in form_item_labels
for offer_item_name in offer_item_names:
assert offer_item_name in form_item_labels
if owner_can_make_deal:
if can_make_deal:
assert "Effectuer une transaction" in form_item_labels
else:
assert "Vous ne possédez pas de quoi faire un marché" in form_item_labels
else:
assert f"{owner.name} ne peut pas assurer cette opération"
async def test_create_offer__nominal_case(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
resp = await web.post(f"/business/{xena.id}/offers-create?permanent=1")
assert 200 == resp.status
resp = await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
assert descr.redirect == f"/business/{xena.id}/offers/1"
await self._assert_owned_offers(kernel, web, xena, count=1, names=["My offer"])
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
request_operand_str=ONE_OF_THEM,
request_item_names=[],
offer_operand_str=ONE_OF_THEM,
offer_item_names=[],
open_=False,
)
async def test_create_offer__change_operands(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
# see test_create_offer__nominal_case if in error
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
assert (
await web.post(
f"/business/{xena.id}/offers/{1}",
json={"request_operand": ALL_OF_THEM, "offer_operand": ALL_OF_THEM},
)
).status == 200
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
)
async def test_create_offer__open_close(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
# see test_create_offer__nominal_case if in error
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
assert (await web.post(f"/business/{xena.id}/offers/{1}?open=1")).status == 200
await self._assert_edit_offer(kernel, web, xena, offer_id=1, open_=True)
await self._assert_owned_offers(kernel, web, xena, count=1, names=["(V) My offer"])
assert (await web.post(f"/business/{xena.id}/offers/{1}?close=1")).status == 200
await self._assert_edit_offer(kernel, web, xena, offer_id=1, open_=False)
await self._assert_owned_offers(kernel, web, xena, count=1, names=["(X) My offer"])
async def test_add_items__check_form(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
resp = await web.post(f"/business/{xena.id}/offers/{1}/add-item?position=REQUEST")
assert resp.status == 200
descr = description_serializer.load(await resp.json())
assert descr.items[0].is_form
assert descr.items[0].items[0].name == "value"
for name in [
"Bois (mètre cubes)",
"Vin rouge (litres)",
"Plastic bottle (unité)",
"Bouclier de bois (unité)",
"Hache de pierre (unité)",
"Veste de cuir (unité)",
"Pierre (unités)",
"Corps (unité)",
"Petit bois (mètre cubes)",
]:
assert name in descr.items[0].items[0].choices
assert descr.items[0].items[1].name == "quantity"
async def test_update_offer__have_some_required__request_and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
# see test_create_offer__nominal_case if in error
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
# Add one stuff
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item"
f"?position=REQUEST&value=Plastic bottle (unité)&quantity=1"
)
).status == 200
await self._assert_edit_offer(
kernel, web, xena, offer_id=1, request_item_names=[EXPECTED_PLASTIC_BOTTLE_NAME]
)
# Add one resource
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item"
f"?position=REQUEST&value=Petit bois (mètre cubes)&quantity=1.50"
)
).status == 200
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
request_item_names=[EXPECTED_PLASTIC_BOTTLE_NAME, "Petit bois (1.5 mètre cubes)"],
)
async def test_update_offer__have_some_required__remove_item(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
# see test_create_offer__nominal_case if in error
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
# Add one stuff
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item?position=REQUEST&value=Plastic bottle (unité)&quantity=1"
)
).status == 200
await self._assert_edit_offer(
kernel, web, xena, offer_id=1, request_item_names=[EXPECTED_PLASTIC_BOTTLE_NAME]
)
# remove it
assert (await web.post(f"/business/{xena.id}/offers/{1}/remove-item/{1}")).status == 200
await self._assert_edit_offer(
kernel, web, xena, offer_id=1, request_item_names_not=[EXPECTED_PLASTIC_BOTTLE_NAME]
)
async def test_edit_offer__test_owner_have_display(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
offer_item_names=["(X) Bois (0.5 mètre cubes)", "(X) Veste de cuir (1)"],
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
open_=True,
)
# add one to offer owner
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
offer_item_names=["Bois (0.5 mètre cubes)", "(X) Veste de cuir (1)"],
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
open_=True,
)
# add one to offer owner
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
request_item_names=["Bois (0.5 mètre cubes)", "Veste de cuir (1)"],
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
open_=True,
)
async def test_read_offer__have_some_required_items__and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_and_offer
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
have_not_item_names=["Vin rouge (1.5 litres)", "Hache de pierre (1)"],
offer_item_names=["(!) Bois (0.5 mètre cubes)", "(!) Veste de cuir (1)"],
owner_can_make_deal=False,
)
kernel.resource_lib.add_resource_to("RED_WINE", 2.0, character_id=arthur.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
have_not_item_names=["Hache de pierre (1)"],
have_item_names=["Vin rouge (1.5 litres)"],
owner_can_make_deal=False,
)
haxe = create_stuff(kernel, "STONE_HAXE")
kernel.stuff_lib.set_carried_by(haxe.id, character_id=arthur.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
have_item_names=["Vin rouge (1.5 litres)", "Hache de pierre (1)"],
owner_can_make_deal=False,
)
# add wood to offer owner (remove the (!))
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
offer_item_names=["Bois (0.5 mètre cubes)", "(!) Veste de cuir (1)"],
owner_can_make_deal=False,
)
# add jacket to offer owner (remove the (!))
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
offer_item_names=["Bois (0.5 mètre cubes)", "Veste de cuir (1)"],
owner_can_make_deal=True,
can_make_deal=True,
)
async def test_read_offer__have_some_required_items__or(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_or_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_or_offer
# ensure xena have all offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_not_item_names=["Vin rouge (1.5 litres)", "Hache de pierre (1)"],
offer_item_names=["Bois (0.5 mètre cubes)", "Veste de cuir (1)"],
can_make_deal=False,
)
kernel.resource_lib.add_resource_to("RED_WINE", 2.0, character_id=arthur.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_not_item_names=["Hache de pierre (1)"],
have_item_names=["Vin rouge (1.5 litres)"],
can_make_deal=True,
)
haxe = create_stuff(kernel, "STONE_HAXE")
kernel.stuff_lib.set_carried_by(haxe.id, character_id=arthur.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_item_names=["Vin rouge (1.5 litres)", "Hache de pierre (1)"],
can_make_deal=True,
)
async def test_read_offer__make_transaction__missing_request_and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_and_offer
# ensure xena have all offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
# Give just a part of necessary to arthur
kernel.resource_lib.add_resource_to("RED_WINE", 2.0, character_id=arthur.id)
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert "Vous ne possédez pas ce qu'il faut pour faire ce marché" in item_labels
async def test_read_offer__make_transaction__owner_missing_offer_and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_and_offer
# xena have just a part of offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert f"{xena.name} ne peut pas assurer cette opération" in item_labels
async def test_read_offer__make_transaction__request_and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
initial_universe_state: UniverseStateDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_and_offer
# Give all necessary to arthur
kernel.resource_lib.add_resource_to("RED_WINE", 2.0, character_id=arthur.id)
haxe = create_stuff(kernel, "STONE_HAXE")
kernel.stuff_lib.set_carried_by(haxe.id, character_id=arthur.id)
# ensure xena have all offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
assert kernel.resource_lib.have_resource(xena.id, "WOOD", 0.5)
assert kernel.stuff_lib.have_stuff_count(xena.id, "LEATHER_JACKET")
assert not kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "STONE_HAXE")
assert not kernel.resource_lib.have_resource(arthur.id, "WOOD", 0.5)
assert not kernel.stuff_lib.have_stuff_count(arthur.id, "LEATHER_JACKET")
assert kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "STONE_HAXE")
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert "Je confirme vouloir faire ce marché" in item_labels
# Do the deal
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal?confirm=1"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert "Marché effectué" in item_labels
assert not kernel.resource_lib.have_resource(xena.id, "WOOD", 0.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "LEATHER_JACKET")
assert kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(xena.id, "STONE_HAXE")
assert kernel.resource_lib.have_resource(arthur.id, "WOOD", 0.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "LEATHER_JACKET")
assert not kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(arthur.id, "STONE_HAXE")
async def test_read_offer__make_transaction__missing_all_request_or(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_or_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_or_offer
# ensure xena have all offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert "Vous ne possédez pas ce qu'il faut pour faire ce marché" in item_labels
async def test_read_offer__make_transaction__request_or(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_or_offer: OfferDocument,
initial_universe_state: UniverseStateDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_or_offer
# ensure xena have one of offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
# Give all necessary to arthur
kernel.resource_lib.add_resource_to("RED_WINE", 1.5, character_id=arthur.id)
haxe = create_stuff(kernel, "STONE_HAXE")
kernel.stuff_lib.set_carried_by(haxe.id, character_id=arthur.id)
assert kernel.resource_lib.have_resource(xena.id, "WOOD", 0.5)
assert not kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "STONE_HAXE")
assert not kernel.resource_lib.have_resource(arthur.id, "WOOD", 0.5)
assert kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "STONE_HAXE")
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
give_wine_str = "Faire ce marché et donner Vin rouge (1.5 litres)"
assert give_wine_str in item_labels
assert "Faire ce marché et donner Hache de pierre (1)" in item_labels
give_wine_url = item_by_label[give_wine_str].form_action
resp = await web.post(give_wine_url)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
take_wood_str = "Faire ce marché et obtenir Bois (0.5 mètre cubes)"
assert take_wood_str in item_labels
assert "Faire ce marché et obtenir Veste de cuir (1)" not in item_labels
# Give jacket to xena to permit take it
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
resp = await web.post(give_wine_url)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
take_wood_str = "Faire ce marché et obtenir Bois (0.5 mètre cubes)"
assert take_wood_str in item_labels
assert "Faire ce marché et obtenir Veste de cuir (1)" in item_labels
take_wood_url = item_by_label[take_wood_str].form_action
resp = await web.post(take_wood_url)
assert 200 == resp.status
assert not kernel.resource_lib.have_resource(xena.id, "WOOD", 0.5)
assert kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "STONE_HAXE")
assert kernel.resource_lib.have_resource(arthur.id, "WOOD", 0.5)
assert not kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "STONE_HAXE")
async def test_create_with_character_transaction(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
initial_universe_state: UniverseStateDocument,
) -> None:
"""+ conteur main page + vue depuis target + blinker"""
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
assert (
await web.post(
f"/business/{xena.id}/offers-create?with_character_id={arthur.id}",
json={"title": "My offer"},
)
).status == 200
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item"
f"?position=REQUEST&value=Plastic bottle (unité)&quantity=1"
)
).status == 200
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item"
f"?position=OFFER&value=Vin rouge (litres)&quantity=1.5"
)
).status == 200
assert (await web.post(f"/business/{xena.id}/offers/{1}?open=1")).status == 200
await self._assert_edit_offer(
kernel,
web,
character=xena,
offer_id=1,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
request_item_names=["Plastic bottle (1)"],
offer_item_names=["(X) Vin rouge (1.5 litres)"],
open_=True,
)
await self._assert_read_offer(
kernel,
web,
owner=xena,
character=arthur,
offer_id=1,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_not_item_names=["Plastic bottle (1)"],
offer_item_names=["(!) Vin rouge (1.5 litres)"],
can_make_deal=False,
)
# Give all necessary
kernel.resource_lib.add_resource_to("RED_WINE", 1.5, character_id=xena.id)
bottle = create_stuff(kernel, "PLASTIC_BOTTLE_1L")
kernel.stuff_lib.set_carried_by(bottle.id, character_id=arthur.id)
assert kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "PLASTIC_BOTTLE_1L")
assert not kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "PLASTIC_BOTTLE_1L")
await self._assert_read_offer(
kernel,
web,
owner=xena,
character=arthur,
offer_id=1,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_item_names=["Plastic bottle (1)"],
offer_item_names=["Vin rouge (1.5 litres)"],
can_make_deal=True,
)
# xena main page
resp: ClientResponse = await web.post(f"/business/{xena.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert "Voir les transactions en attente (1 en cours)" in item_labels
# arthur main page
resp: ClientResponse = await web.post(f"/business/{arthur.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert "*Voir les transactions en attente (1 en cours)" in item_labels
resp = await web.post(f"/business/{arthur.id}/see-offer/{xena.id}/{1}/deal")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
deal_str = "Faire ce marché et donner Plastic bottle (1)"
assert deal_str in item_labels
go_url = item_by_label[deal_str].form_action
resp = await web.post(go_url)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
deal_str = "Faire ce marché et obtenir Vin rouge (1.5 litres)"
assert deal_str in item_labels
go_url = item_by_label[deal_str].form_action
assert (await web.post(go_url)).status == 200
assert not kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(xena.id, "PLASTIC_BOTTLE_1L")
assert kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(arthur.id, "PLASTIC_BOTTLE_1L")
# xena main page
resp: ClientResponse = await web.post(f"/business/{xena.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert "Voir les transactions en attente (0 en cours)" in item_labels
# arthur main page
resp: ClientResponse = await web.post(f"/business/{arthur.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert "Voir les transactions en attente (0 en cours)" in item_labels
|
[
"sevajol.bastien@gmail.com"
] |
sevajol.bastien@gmail.com
|
1baef92bd7d86cf90bd97918be9c5310d34eab69
|
6ca2b1b16fce00730bc47a5e6d724aad7698a1f4
|
/settings.py
|
6b962dbdae1cfd07d515a8220fc6f1cfbe6390ea
|
[] |
no_license
|
eddy-suiyun/02_Alien-invasion
|
72ef3801fb1c1f88743e92de75cf52ea94186b0a
|
4ba470369f033d788a890a739d0d608842eb8035
|
refs/heads/master
| 2021-05-25T17:17:26.037632
| 2020-04-12T13:43:17
| 2020-04-12T13:43:17
| 253,839,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
class Settings(object):
"""存储《外星人入侵》的初始化类"""
def __init__(self):
"""初始化游戏的设置"""
# 屏幕设置
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230,230,230)
# 飞船设置
self.ship_speed_factor = 1.5
# 子弹设置
self.bullet_speed_factor = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 250,250,250
self.bullets_allowed = 6
|
[
"125443673@qq.com"
] |
125443673@qq.com
|
81d48422f1d924c39d76dcbf258ebdb977195149
|
af3840c306fa5eeb2f0d9e5ed779a582d3c7d5d7
|
/구구단 11-16.py
|
e45c083721e6ad8260dc97b620622219cfb596a2
|
[] |
no_license
|
HanseamChung/prac_python
|
8c6053402a456eb3c78941b0917fa1570c56bbea
|
d8fc32989ae9707261deb602bea8853d7f7a0aed
|
refs/heads/master
| 2021-05-05T03:12:16.940521
| 2018-02-09T08:37:10
| 2018-02-09T08:37:10
| 119,792,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
i, k = 0, 0
for i in range(11, 17) :
print('\n')
for k in range(1, 10) :
print(str(i*k)+" " , end="")
|
[
"noreply@github.com"
] |
noreply@github.com
|
8658832a7dcc4eabcb30779bae2a6408403a2d6a
|
3f5e290adefe73d55c4170029629c9f4794920f1
|
/rewrite_verify.py
|
6362a48d1f9e92fa3daea97e8de31e2f69f66ab9
|
[] |
no_license
|
fengmu/mana1
|
d318b36294ab17f6ee8d4b4536a5ddad94e0dce5
|
7d64c33580e2fdc2cbd81e17124dec44d1d8858a
|
refs/heads/master
| 2020-12-24T13:28:28.937308
| 2012-12-11T09:30:43
| 2012-12-11T09:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,377
|
py
|
# -*- coding: utf-8 -*-
'''
#=============================================================================
# FileName: rewrite_verify.py
# Desc:
# Author: solomon
# Email: 253376634@qq.com
# HomePage:
# Version: 0.0.1
# LastChange: 2012-10-28 18:31:19
# History:
#=============================================================================
'''
from django.template.loader import get_template
from django.template import Context
from django.http import HttpResponse
import confsql,datetime,memcache
from django.utils import simplejson
#import writemc
import functions
from mylog import log
def rewrite_verify(request): #审核处理重写
#os.system("cmd /c D:/django/mysite/mana1/writemc.py")
myjson = simplejson.loads(request.POST["myjson"])
rs1=functions.trim_csv(myjson["table"],7)
s="" #存放门店名单
for rs in rs1:
if rs[0]=="1": #勾选的门店
s+="'"+rs[1]+"',"
s=s[0:-1]
#os.system('cmd.exe /c c:/ZSW/memcached/memcached.exe -m 256 -p 11211') #开启内存服务
html=""
if s<>"":
writemc.Writemc(sqlstr=s)
result=confsql.runquery(u"select * from brainfo where 门店代码 in("+s+")") #获取门店综合信息 供门店审核是否需要重写
html="<tr><th>重算</th><th>门店代码</th><th>门店名称</th><th>品项数</th><th>库存数量</th><th>建议订货总量</th><th>建议订货总额</th></tr>"
for rs in result:
html+="<tr><td><input type='checkbox'></td><td>"+str(rs[0])+"</td><td>"+str(rs[1].encode("utf8"))+"</td><td>"+str(rs[2])+"</td><td>"+str(rs[3])+"</td><td>"+str(rs[4])+"</td><td>"+str(rs[5])+"</td></tr>"
return HttpResponse(html)
else:
writemc.Writemc() #全部重写
result=confsql.runquery("select * from brainfo") #获取门店综合信息 供门店审核是否需要重写
html="<tr><th>重算</th><th>门店代码</th><th>门店名称</th><th>品项数</th><th>库存数量</th><th>建议订货总量</th><th>建议订货总额</th></tr>"
for rs in result:
html+="<tr><td><input type='checkbox'></td><td>"+str(rs[0])+"</td><td>"+str(rs[1].encode("utf8"))+"</td><td>"+str(rs[2])+"</td><td>"+str(rs[3])+"</td><td>"+str(rs[4])+"</td><td>"+str(rs[5])+"</td></tr>"
return HttpResponse(html)
|
[
"fengmu1225@qq.com"
] |
fengmu1225@qq.com
|
9eea9265e1ace539b7498d06a98811dc189c3578
|
13486ab3a7c9f4221b25236d11a44caadf9f90f0
|
/programming-three/three134.py
|
67abd3857686a1d4fc7033b00938fa1b80d3ce94
|
[] |
no_license
|
strawwhat/diary
|
f0ec0c86b32d3ba7bedba33c9c629ec41c9e9a7b
|
2d925b42e61c8aebbe6d212bceb3de7e10bd58c3
|
refs/heads/master
| 2021-01-22T22:34:09.645638
| 2017-10-02T07:44:41
| 2017-10-02T07:44:41
| 85,550,383
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,269
|
py
|
#!/usr/bin/python
# *-*coding:utf-8 *-*
"示例3-9 page134 redirect.py 重定向流到python对象"
"""
file-like objects that save standard output in a string and provide
standard input text a string ; redirect runs a passed-in function
with its output and input streams reset to these file-like class objects
类似文件的对象,用于在字符串中保存标准输出并提供
标准输入文本字符串; 重定向运行传入函数
其输出和输入流重新设置为这些类似文件的类对象
在Python中,任何在方法上与文件类似的对象都可以充当标准流。
它和对象数据类型无关,而取决于接口(有时被称为协议)即:
任何提供了类似于文件read方法的对象可以指定给sys.stdin,
以从该对象的read方法读取输入
任何定义了类似于文件write方法的对象可以指定给sys.stdout,
所有的标准输出将发送到该对象方法上
"""
import sys
class Output: #模拟输出文件
def __init__(self):
self.text = '' #新建空字符串
def write(self, string):
self.text += string #添加字节字符串
def writelines(self, lines): #在列表中添加每一行数据
for line in lines: self.write(line)
#模拟输入文件
class Input:
def __init__(self, input=''): #默认参数
self.text = input
def read(self, size=None): #保存新建字符串,可选参数
if size == None: #读取n个字节,或者所有字节
res, self.text = self.text, ''
else:
res, self.text = self.text[:size], self.text[size:]
return res
def readline(self):
eoln = self.text.find('\n') #查找下一个eoln的偏移位置
if eoln == -1: #清洗eoln,其值为-1
res, self.text = self.text, ''
else:
res, self.text = self.text[:eoln+1], self.text[eoln+1:]
return res
def redirect(function, pargs, kargs, input): #重定向stdin/out
savestreams = sys.stdin, sys.stdout #运行函数对象
sys.stdin = Input(input) #返回stdout文件
sys.stdout = Output()
try:
result = function(*pargs, **kargs) #运行带参数的函数
output = sys.stdout.text
finally:
sys.stdin, sys.stdout = savestreams #如果存在exc或者其他,重新存储数据
return (result, output) #如果不存在exc,返回结束
|
[
"bnm1122@yeah.net"
] |
bnm1122@yeah.net
|
dde7d82754424f14d0b28a6142c13333535560f6
|
e3adbec6cd8d0b50880b3b606352a1c751d4ac79
|
/functions/singly_linked_list.py
|
7cadf3954044adea1f9fcd0cccd0b5268d96d8b1
|
[] |
no_license
|
ZiyaoGeng/LeetCode
|
3cc5b553df5eac2e5bbb3ccd0f0ed4229574fa2f
|
c4c60b289c0bd9d9f228d04abe948d6287e70ea8
|
refs/heads/master
| 2022-04-07T08:19:58.647408
| 2020-03-12T08:56:13
| 2020-03-12T08:56:13
| 218,981,503
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
|
[
"593947521@qq.com"
] |
593947521@qq.com
|
7534fdc5e9d0e271082d603c5c0a1ba2262d679e
|
873d858b79a51a6a14e74e1a6fe4cc97809a69bc
|
/rosserial_ws/devel/lib/rosserial_client/make_library.py
|
eed0f221f32c99f4c790655eeb0d5132d20cacf2
|
[] |
no_license
|
nichoteloo/ROS-Noetic-devel
|
cf3058014fc491f38a23426c136cb8fbdee7a397
|
81e7090c5dc0e548aed4aa57b9579e355e9bcd25
|
refs/heads/master
| 2023-05-07T19:21:03.804523
| 2021-06-02T21:13:48
| 2021-06-02T21:13:48
| 373,293,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/script.py.in
# creates a relay to a python script source file, acting as that file.
# The purpose is that of a symlink
python_script = '/home/nichotelo/ros/rosserial_ws/src/rosserial/rosserial_client/src/rosserial_client/make_library.py'
with open(python_script, 'r') as fh:
context = {
'__builtins__': __builtins__,
'__doc__': None,
'__file__': python_script,
'__name__': __name__,
'__package__': None,
}
exec(compile(fh.read(), python_script, 'exec'), context)
|
[
"nicolaschristianto@mail.ugm.ac.id"
] |
nicolaschristianto@mail.ugm.ac.id
|
19947694ca5e83f139404b18e79c23211e055d99
|
74c776f1a9a059bbc530fe5d6b12165425d3f954
|
/run_cn.py
|
33e53e3cce1647da732faa01a74054d3bb265f5a
|
[
"MIT"
] |
permissive
|
Lyttonkeepfoing/SANER
|
0d2ea7df305e0599511bb349dc2b05af59b64425
|
ac30f441be615de5224411816935283eddfbe330
|
refs/heads/master
| 2023-08-25T05:23:01.412133
| 2021-10-19T14:58:11
| 2021-10-19T14:58:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
import os
# the number of similar words
num = 10
# dataset name
dataset = "WB"
seed = 14
attn_type = "dot"
fusion_type = "gate-concat"
# Path of bert model
bert_model = "data/bert-base-chinese"
# Path of the pre-trained word embeddings for getting similar words for each token
glove_path = "data/tencent_unigram.txt"
pool_method = "first"
# Path of the ZEN model
zen_model = "zen_base/"
log = "log/{}_zen_{}_{}.txt".format(dataset, pool_method, num)
os.system("python3 train_zen_cn.py --dataset {} "
"--seed {} --kv_attn_type {} --fusion_type {} --context_num {} "
"--bert_model {} --pool_method {} --glove_path {} "
"--zen_model {} "
"--lr 0.0001 --trans_dropout 0.2 --fc_dropout 0.4 --memory_dropout 0.2 "
"--fusion_dropout 0.2 --log {}".format(dataset, seed, attn_type, fusion_type,
num, bert_model, pool_method, glove_path, zen_model, log))
|
[
"nyy477@qq.com"
] |
nyy477@qq.com
|
07a8d38422d30557d6ed10da8a9d60d2ec141308
|
3c397042e7fa0d7d4fa25cd75f0d10babd9f933f
|
/lab_8/mysite/dishes/urls.py
|
5290d59685f5e3ad3fc3fb854489224b5e6a6100
|
[] |
no_license
|
StepanIonov/RIP_lab
|
f34f2a95fb8ddcfeeb703efd7088320f40ac1fc5
|
0fefaf77d664ed404d791422658a062fc3e9201c
|
refs/heads/master
| 2023-02-20T12:38:33.389360
| 2021-01-18T10:13:24
| 2021-01-18T10:13:24
| 295,768,234
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('<int:dish_id>/', views.detail, name='detail'),
path('', views.index, name='index'),
]
|
[
"42943755+StepanIonov@users.noreply.github.com"
] |
42943755+StepanIonov@users.noreply.github.com
|
f7af16fa31198ad465290cab948bded706b51328
|
b716ab7cc296ba20f23bbc2aed5af6bef29923fc
|
/2.py
|
0f72894c2caf328cb32d74883ee59561b00848b4
|
[] |
no_license
|
widyamellysa/dumbways-test
|
efea011988b962240af3a5f13fa9fc029737e0e7
|
ec5a9d09afd12af35d8cd8073376f9727d6c3d25
|
refs/heads/master
| 2022-11-23T16:27:10.859000
| 2020-08-01T14:49:12
| 2020-08-01T14:49:12
| 284,274,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
total_belanja = float(input("Total belanja : "))
bayar = float(input("Jumlah uang : "))
#DumbwaysJos
if total_belanja >= 50000 and total_belanja <= 80000:
diskon1 = 20000
hasil1 = total_belanja - diskon1
sisa1 = bayar - hasil1
print("Diskon : ", diskon1)
print("Kembalian : ", sisa1)
elif total_belanja < 50000:
diskon2 = 0
hasil2 = total_belanja - diskon2
sisa2 = bayar - hasil2
print("Diskon : ", diskon2)
print("Kembalian : ", sisa2)
#DumbwaysMantap
elif total_belanja > 80000:
diskon3 = 40000
hasil3 = total_belanja - diskon3
sisa3 = bayar - hasil3
print("Diskon : ", diskon3)
print("Kembalian : ", sisa3)
|
[
"noreply@github.com"
] |
noreply@github.com
|
1e44102c3d0503284237207d931a11eff57d5169
|
11805f3f0ca426ae21b754526578fe6b69c9fd52
|
/emotion-analysis-3-master/read_excel/read_oneStu_allWeek.py
|
852b464b524bf620e6f7eedd64b3284516bced5e
|
[] |
no_license
|
chensheng1/NLP_emotion
|
106838156224cfc1ab9491df908d92bfdfac2915
|
f43760e7493aa4d360dd0c4d0bca8ba100614612
|
refs/heads/master
| 2023-03-22T15:54:16.107975
| 2021-03-08T15:00:46
| 2021-03-08T15:00:46
| 345,604,118
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,740
|
py
|
'''
读取某同学n的课前、课后和每周总结数据
'''
# -*- coding: utf-8 -*-
import xlrd
xlsfile1 = r"课前预习.xls"# 打开课前预习
xlsfile2 = r"课后作业.xls"# 打开课后作业
xlsfile3 = r"每周总结.xls"# 打开每周总结
xlsfile4 = r"实验练习.xls"# 打开实验练习
book1 = xlrd.open_workbook(xlsfile1)#得到Excel文件的book对象,实例化对象
book2 = xlrd.open_workbook(xlsfile2)
book3 = xlrd.open_workbook(xlsfile3)
book4 = xlrd.open_workbook(xlsfile4)
sheet1 = book1.sheet_by_index(0) # 通过sheet索引获得课前预习sheet对象
sheet2 = book2.sheet_by_index(0)
sheet3 = book3.sheet_by_index(0)
sheet4 = book4.sheet_by_index(0)
nrows1 = sheet1.nrows # 获取行总数
ncols1 = sheet1.ncols #获取列总数
nrows2 = sheet2.nrows
ncols2 = sheet2.ncols
nrows3 = sheet3.nrows
ncols3 = sheet3.ncols
print ("课前预习总行/列数",nrows1,ncols1)
print ("课后作业总行/列数",nrows2,ncols2)
print ("每周总结总行/列数",nrows3,ncols3)
f0 = open('./oneStudent/oneStu_allWeek.txt', 'w', encoding='utf-8')
#筛选同学X在n周内的所有课前、课后和每周总结评论,空的补“null”
def choiceAllText(name,week):
print("学生姓名:",name)
for k in range(1,week+1):
flag1 = "false"
flag2 = "false"
flag3 = "false"
#课前预习筛选
for i in range(nrows1):
a = sheet1.cell_value(i,2)#获取姓名
b = sheet1.cell_value(i,4)#获取周次
c = sheet1.cell_value(i,6)#获取评论文本
text = "null"
if(a == name and b == k):
flag1 = "true"
print(k,"周课前:",c)
f0.write(c)
f0.write('\n')
break
if flag1 == "false":
print(k,"周课前:null")
f0.write("null")
f0.write('\n')
#课后作业筛选
for i in range(nrows2):
a = sheet2.cell_value(i,2)#获取姓名
b = sheet2.cell_value(i,4)#获取周次
c = sheet2.cell_value(i,6)#获取评论文本
if(a == name and b == k):
flag2 = "true"
print(k,"周课后:",c)
f0.write(c)
f0.write('\n')
break
if flag2 == "false":
print(k,"周课后:null")
f0.write("null")
f0.write('\n')
#每周总结筛选
for i in range(nrows3):
a = sheet3.cell_value(i,2)#获取姓名
b = sheet3.cell_value(i,4)#获取周次
c = sheet3.cell_value(i,5)#获取评论文本
if(a == name and b == k):
flag3 = "true"
print(k,"周每周:",c)
f0.write(c)
f0.write('\n')
break
if flag3 == "false":
print(k,"周每周:null")
f0.write("null")
f0.write('\n')
name1 = "文习尚"#m1701
name2 = "林雨钦"#m1702
name3 = "赵华源"#m1703
name4 = "徐海标"#m1704
name5 = "李宵"
name6 = "朱智"
name7 = "刘晓稳"
name8 = "朱浩杰"
week = 13
choiceAllText(name4,week)
|
[
"544740618@qq.com"
] |
544740618@qq.com
|
64778fcbc1e5fce8a3fa62050a7bf3561afa5db0
|
5822e8bfd09ec98770966763a6637dfaeb0b6dbd
|
/TWLight/emails/views.py
|
350215a5e68768e96691e19d9b15fa5c417b7043
|
[
"MIT"
] |
permissive
|
Jain-Aditya/TWLight
|
997c01cab83b9af5073589808011c3cc3ecc6d51
|
4c005fb9346e262cc452509b029774bde6cff0dc
|
refs/heads/master
| 2020-04-30T07:56:38.706751
| 2019-03-18T08:04:35
| 2019-03-18T08:04:35
| 176,700,371
| 0
| 0
| null | 2019-03-20T09:34:36
| 2019-03-20T09:34:36
| null |
UTF-8
|
Python
| false
| false
| 2,763
|
py
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.mail import BadHeaderError, send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from TWLight.emails.forms import ContactUsForm
from TWLight.emails.signals import ContactUs
@method_decorator(login_required, name='post')
class ContactUsView(FormView):
template_name = 'emails/contact.html'
form_class = ContactUsForm
success_url = reverse_lazy('contact')
def get_initial(self):
initial = super(ContactUsView, self).get_initial()
# @TODO: This sort of gets repeated in ContactUsForm.
# We could probably be factored out to a common place for DRYness.
if self.request.user.is_authenticated():
if self.request.user.email:
initial.update({
'email': self.request.user.email,
})
if ('message' in self.request.GET):
initial.update({
'message': self.request.GET['message'],
})
initial.update({
'next': reverse_lazy('contact'),
})
return initial
def form_valid(self, form):
# Adding an extra check to ensure the user is a wikipedia editor.
try:
assert self.request.user.editor
email = form.cleaned_data['email']
message = form.cleaned_data['message']
carbon_copy = form.cleaned_data['cc']
ContactUs.new_email.send(
sender=self.__class__,
user_email=email,
cc=carbon_copy,
editor_wp_username=self.request.user.editor.wp_username,
body=message
)
messages.add_message(self.request, messages.SUCCESS,
# Translators: Shown to users when they successfully submit a new message using the contact us form.
_('Your message has been sent. We\'ll get back to you soon!'))
return HttpResponseRedirect(reverse('contact'))
except (AssertionError, AttributeError) as e:
messages.add_message (self.request, messages.WARNING,
# Translators: This message is shown to non-wikipedia editors who attempt to post data to the contact us form.
_('You must be a Wikipedia editor to do that.'))
raise PermissionDenied
return self.request.user.editor
|
[
"uyscuti.wiki@gmail.com"
] |
uyscuti.wiki@gmail.com
|
3039c444c18f0b492a472774de7ddcf70fefc723
|
01dd174a3a7d26226564711e32711f137513663f
|
/pyscf/grad/uks.py
|
cb2d2d1dd98fc530fe5f2ab8a3bcd9d5ad9f1214
|
[
"Apache-2.0"
] |
permissive
|
cherishyli/pyscf
|
00cb09c873edc8890be8501414678cdfa54b177e
|
468a4bfc4ce067eb7dab6f9289d71122b219609e
|
refs/heads/master
| 2020-04-18T11:40:00.398066
| 2019-01-24T23:07:36
| 2019-01-24T23:07:36
| 167,508,739
| 1
| 0
|
Apache-2.0
| 2019-01-25T08:00:12
| 2019-01-25T08:00:12
| null |
UTF-8
|
Python
| false
| false
| 10,924
|
py
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''Non-relativistic UKS analytical nuclear gradients'''
import time
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.grad import rhf as rhf_grad
from pyscf.grad import rks as rks_grad
from pyscf.grad import uhf as uhf_grad
from pyscf.dft import numint, gen_grid
from pyscf import __config__
def get_veff(ks_grad, mol=None, dm=None):
'''Coulomb + XC functional
'''
if mol is None: mol = ks_grad.mol
if dm is None: dm = ks_grad.base.make_rdm1()
t0 = (time.clock(), time.time())
mf = ks_grad.base
ni = mf._numint
if ks_grad.grids is not None:
grids = ks_grad.grids
else:
grids = mf.grids
if grids.coords is None:
grids.build(with_non0tab=True)
if mf.nlc != '':
raise NotImplementedError
#enabling range-separated hybrids
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
mem_now = lib.current_memory()[0]
max_memory = max(2000, ks_grad.max_memory*.9-mem_now)
if ks_grad.grid_response:
exc, vxc = get_vxc_full_response(ni, mol, grids, mf.xc, dm,
max_memory=max_memory,
verbose=ks_grad.verbose)
logger.debug1(ks_grad, 'sum(grids response) %s', exc.sum(axis=0))
else:
exc, vxc = get_vxc(ni, mol, grids, mf.xc, dm,
max_memory=max_memory, verbose=ks_grad.verbose)
t0 = logger.timer(ks_grad, 'vxc', *t0)
if abs(hyb) < 1e-10:
vj = ks_grad.get_j(mol, dm)
vxc += vj[0] + vj[1]
else:
vj, vk = ks_grad.get_jk(mol, dm)
vk *= hyb
if abs(omega) > 1e-10: # For range separated Coulomb operator
with mol.with_range_coulomb(omega):
vk += ks_grad.get_k(mol, dm) * (alpha - hyb)
vxc += vj[0] + vj[1] - vk
return lib.tag_array(vxc, exc1_grid=exc)
def get_vxc(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)
ao_loc = mol.ao_loc_nr()
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1, verbose)[1]
vrho = vxc[0]
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,0])
rks_grad._d1_dot_(vmat[0], mol, ao[1:4], aow, mask, ao_loc, True)
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,1])
rks_grad._d1_dot_(vmat[1], mol, ao[1:4], aow, mask, ao_loc, True)
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho_a = make_rho(0, ao[:4], mask, 'GGA')
rho_b = make_rho(1, ao[:4], mask, 'GGA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1, verbose)[1]
wva, wvb = numint._uks_gga_wv0((rho_a,rho_b), vxc, weight)
rks_grad._gga_grad_sum_(vmat[0], mol, ao, wva, mask, ao_loc)
rks_grad._gga_grad_sum_(vmat[1], mol, ao, wvb, mask, ao_loc)
rho_a = rho_b = vxc = wva = wvb = None
elif xctype == 'NLC':
raise NotImplementedError('NLC')
else:
raise NotImplementedError('meta-GGA')
exc = numpy.zeros((mol.natm,3))
# - sign because nabla_X = -nabla_x
return exc, -vmat
def get_vxc_full_response(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
'''Full response including the response of the grids'''
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)
ao_loc = mol.ao_loc_nr()
aoslices = mol.aoslice_by_atom()
excsum = 0
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
ao_deriv = 1
for atm_id, (coords, weight, weight1) \
in enumerate(rks_grad.grids_response_cc(grids)):
ngrids = weight.size
sh0, sh1 = aoslices[atm_id][:2]
mask = gen_grid.make_mask(mol, coords)
ao = ni.eval_ao(mol, coords, deriv=ao_deriv, non0tab=mask)
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
exc, vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1, verbose)[:2]
vrho = vxc[0]
vtmp = numpy.zeros((3,nao,nao))
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,0])
rks_grad._d1_dot_(vtmp, mol, ao[1:4], aow, mask, ao_loc, True)
vmat[0] += vtmp
excsum += numpy.einsum('r,r,nxr->nx', exc, rho_a+rho_b, weight1)
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[0]) * 2
vtmp = numpy.zeros((3,nao,nao))
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,1])
rks_grad._d1_dot_(vtmp, mol, ao[1:4], aow, mask, ao_loc, True)
vmat[1] += vtmp
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[1]) * 2
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 2
for atm_id, (coords, weight, weight1) \
in enumerate(rks_grad.grids_response_cc(grids)):
ngrids = weight.size
sh0, sh1 = aoslices[atm_id][:2]
mask = gen_grid.make_mask(mol, coords)
ao = ni.eval_ao(mol, coords, deriv=ao_deriv, non0tab=mask)
rho_a = make_rho(0, ao[:4], mask, 'GGA')
rho_b = make_rho(1, ao[:4], mask, 'GGA')
exc, vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1, verbose)[:2]
wva, wvb = numint._uks_gga_wv0((rho_a,rho_b), vxc, weight)
vtmp = numpy.zeros((3,nao,nao))
rks_grad._gga_grad_sum_(vtmp, mol, ao, wva, mask, ao_loc)
vmat[0] += vtmp
excsum += numpy.einsum('r,r,nxr->nx', exc, rho_a[0]+rho_b[0], weight1)
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[0]) * 2
vtmp = numpy.zeros((3,nao,nao))
rks_grad._gga_grad_sum_(vtmp, mol, ao, wvb, mask, ao_loc)
vmat[1] += vtmp
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[1]) * 2
rho_a = rho_b = vxc = wva = wvb = None
elif xctype == 'NLC':
raise NotImplementedError('NLC')
else:
raise NotImplementedError('meta-GGA')
# - sign because nabla_X = -nabla_x
return excsum, -vmat
class Gradients(uhf_grad.Gradients):
grid_response = getattr(__config__, 'grad_uks_Gradients_grid_response', False)
def __init__(self, mf):
uhf_grad.Gradients.__init__(self, mf)
self.grids = None
self.grid_response = False
self._keys = self._keys.union(['grid_response', 'grids'])
def dump_flags(self):
uhf_grad.Gradients.dump_flags(self)
logger.info(self, 'grid_response = %s', self.grid_response)
return self
get_veff = get_veff
def extra_force(self, atom_id, envs):
'''Hook for extra contributions in analytical gradients.
Contributions like the response of auxiliary basis in density fitting
method, the grid response in DFT numerical integration can be put in
this function.
'''
if self.grid_response:
vhf = envs['vhf']
log = envs['log']
log.debug('grids response for atom %d %s',
atom_id, vhf.exc1_grid[atom_id])
return vhf.exc1_grid[atom_id]
else:
return 0
Grad = Gradients
if __name__ == '__main__':
from pyscf import gto
from pyscf import dft
mol = gto.Mole()
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.basis = '631g'
mol.charge = 1
mol.spin = 1
mol.build()
mf = dft.UKS(mol)
mf.conv_tol = 1e-12
#mf.grids.atom_grid = (20,86)
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.12090786243525126)
#[[-5.23195019e-16 -5.70291415e-16 5.32918387e-02]
# [ 1.33417513e-16 6.75277008e-02 -2.66519852e-02]
# [ 1.72274651e-16 -6.75277008e-02 -2.66519852e-02]]
g.grid_response = True
print(lib.finger(g.kernel()) - -0.12091122429043633)
#[[-2.95956939e-16 -4.22275612e-16 5.32998759e-02]
# [ 1.34532051e-16 6.75279140e-02 -2.66499379e-02]
# [ 1.68146089e-16 -6.75279140e-02 -2.66499379e-02]]
mf.xc = 'b88,p86'
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.11509739136150157)
#[[ 2.58483362e-16 5.82369026e-16 5.17616036e-02]
# [-5.46977470e-17 6.39273304e-02 -2.58849008e-02]
# [ 5.58302713e-17 -6.39273304e-02 -2.58849008e-02]]
g.grid_response = True
print(lib.finger(g.kernel()) - -0.11507986316077731)
mf.xc = 'b3lypg'
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.10202554999695367)
#[[ 6.47874920e-16 -2.75292214e-16 3.97215970e-02]
# [-6.60278148e-17 5.87909340e-02 -1.98650384e-02]
# [ 6.75500259e-18 -5.87909340e-02 -1.98650384e-02]]
mol = gto.Mole()
mol.atom = [
['H' , (0. , 0. , 1.804)],
['F' , (0. , 0. , 0. )], ]
mol.unit = 'B'
mol.basis = '631g'
mol.charge = -1
mol.spin = 1
mol.build()
mf = dft.UKS(mol)
mf.conv_tol = 1e-14
mf.kernel()
print(lib.finger(Gradients(mf).kernel()) - 0.10365160440876001)
# sum over z direction non-zero, due to meshgrid response
# H -0.0000000000 0.0000000000 -0.1481125370
# F -0.0000000000 0.0000000000 0.1481164667
mf = dft.UKS(mol)
mf.grids.prune = None
mf.grids.level = 6
mf.conv_tol = 1e-14
mf.kernel()
print(lib.finger(Gradients(mf).kernel()) - 0.10365040148752827)
# H 0.0000000000 0.0000000000 -0.1481124925
# F -0.0000000000 0.0000000000 0.1481122913
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
e259df553081c2a0843857a31971fbeb29ab02d1
|
8c9df3465ec7cab68b10e67823c1f9b475dab68e
|
/square__transverse_longitudinal_field_af_ising__static/square_ising.py
|
12dad1d1699c6934cd3da33fb9d3ea8f37bdd5f5
|
[
"BSD-3-Clause"
] |
permissive
|
deyh2020/quspin_example
|
f86cf3cea2b8c04efc017e9618cb935494e94f82
|
931ca2ea5e6bbe02ebdd6d6a22d90db24d6c760c
|
refs/heads/master
| 2023-02-07T21:27:12.913763
| 2020-12-30T08:00:57
| 2020-12-30T08:00:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,427
|
py
|
## http://weinbe58.github.io/QuSpin/generated/quspin.basis.spin_basis_general.html#quspin.basis.spin_basis_general
## https://doi.org/10.1103/PhysRevX.8.021069
## https://doi.org/10.1103/PhysRevX.8.021070
## consider nearest neighbor Ising
from __future__ import print_function, division
from quspin.operators import hamiltonian # operators
from quspin.basis import spin_basis_general # spin basis constructor
import numpy as np # general math functions
def exact_diag(J,Hx,Hz,Lx,Ly):
N_2d = Lx*Ly # number of sites
###### setting up user-defined symmetry transformations for 2d lattice ######
s = np.arange(N_2d) # sites [0,1,2,....]
x = s%Lx # x positions for sites
y = s//Lx # y positions for sites
T_x = (x+1)%Lx + Lx*y # translation along x-direction
T_y = x +Lx*((y+1)%Ly) # translation along y-direction
P_x = x + Lx*(Ly-y-1) # reflection about x-axis
P_y = (Lx-x-1) + Lx*y # reflection about y-axis
Z = -(s+1) # spin inversion
###### setting up bases ######
# basis_2d = spin_basis_general(N=N_2d,S="1/2",pauli=0)
basis_2d = spin_basis_general(N=N_2d,S="1/2",pauli=0,kxblock=(T_x,0),kyblock=(T_y,0))
###### setting up hamiltonian ######
# setting up site-coupling lists
Jzzs = [[J,i,T_x[i]] for i in range(N_2d)]+[[J,i,T_y[i]] for i in range(N_2d)]
Hxs = [[-Hx,i] for i in range(N_2d)]
Hzs = [[-Hz,i] for i in range(N_2d)]
static = [["zz",Jzzs],["x",Hxs],["z",Hzs]]
# build hamiltonian
# H = hamiltonian(static,[],static_fmt="csr",basis=basis_2d,dtype=np.float64)
no_checks = dict(check_symm=False, check_pcon=False, check_herm=False)
H = hamiltonian(static,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks)
# diagonalise H
ene,vec = H.eigsh(time=0.0,which="SA",k=2)
# ene = H.eigsh(time=0.0,which="SA",k=2,return_eigenvectors=False); ene = np.sort(ene)
norm2 = np.linalg.norm(vec[:,0])**2
# calculate uniform magnetization
int_mx = [[1.0,i] for i in range(N_2d)]
int_mz = [[1.0,i] for i in range(N_2d)]
static_mx = [["x",int_mx]]
static_mz = [["z",int_mz]]
op_mx = hamiltonian(static_mx,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0)
op_mz = hamiltonian(static_mz,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0)
mx = (np.conjugate(vec[:,0]).dot(op_mx.dot(vec[:,0])) / norm2).real / N_2d
mz = (np.conjugate(vec[:,0]).dot(op_mz.dot(vec[:,0])) / norm2).real / N_2d
# calculate n.n. sz.sz correlation
int_mz0mz1 = [[1.0,i,T_x[i]] for i in range(N_2d)]+[[1.0,i,T_y[i]] for i in range(N_2d)]
static_mz0mz1 = [["zz",int_mz0mz1]]
op_mz0mz1 = hamiltonian(static_mz0mz1,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0)
mz0mz1 = (np.conjugate(vec[:,0]).dot(op_mz0mz1.dot(vec[:,0])) / norm2).real / N_2d
return ene, mx, mz, mz0mz1
def main():
###### define model parameters ######
Lx, Ly = 4, 4 # linear dimension of 2d lattice
N_2d = Lx*Ly # number of sites
J = 1.0 # AF Ising
# Hz = 2.00 # longitudinal field
Hzs = np.linspace(0.0,4.0,401)
# Hzs = np.linspace(1.99,2.03,41)
Hx = 0.10 # transverse field
for Hz in Hzs:
ene, mx, mz, mz0mz1 = exact_diag(J,Hx,Hz,Lx,Ly)
# print(J,Hz,Hx,Lx,Ly,ene[0]/N_2d,ene[1]/N_2d)
print(J,Hz,Hx,Lx,Ly,ene[0]/N_2d,mx,mz,mz0mz1)
if __name__ == "__main__":
main()
|
[
"27846552+ryuikaneko@users.noreply.github.com"
] |
27846552+ryuikaneko@users.noreply.github.com
|
811e650b58eaf4337be5d070b3152062620dfaa4
|
1d1a21b37e1591c5b825299de338d18917715fec
|
/Mathematics/Data science/Mathmatics/02/inverse_matrix.py
|
5531c0cc7924c0fa9e1eb9313e95e425439086b8
|
[] |
no_license
|
brunoleej/study_git
|
46279c3521f090ebf63ee0e1852aa0b6bed11b01
|
0c5c9e490140144caf1149e2e1d9fe5f68cf6294
|
refs/heads/main
| 2023-08-19T01:07:42.236110
| 2021-08-29T16:20:59
| 2021-08-29T16:20:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
import numpy as np
A = np.array([[1,1,0],[0,1,1],[1,1,1]])
print(A)
'''
[[1 1 0]
[0 1 1]
[1 1 1]]
'''
# 역행렬(inverse_matrix 계산)
Ainv = np.linalg.inv(A)
print(Ainv)
'''
[[ 0. -1. 1.]
[ 1. 1. -1.]
[-1. 0. 1.]]
'''
|
[
"jk04059@naver.com"
] |
jk04059@naver.com
|
d8e85972fade73cbb7841a166d847c90f11b5bd4
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/operations/_virtual_machine_extensions_operations.py
|
9e1af3df025f8ded444c980807a5abb4416ee1ed
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 44,661
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, vm_name: str, vm_extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"vmExtensionName": _SERIALIZER.url("vm_extension_name", vm_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, vm_name: str, vm_extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"vmExtensionName": _SERIALIZER.url("vm_extension_name", vm_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, vm_name: str, vm_extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"vmExtensionName": _SERIALIZER.url("vm_extension_name", vm_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_request(
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"vmExtensionName": _SERIALIZER.url("vm_extension_name", vm_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(
resource_group_name: str, vm_name: str, subscription_id: str, *, expand: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class VirtualMachineExtensionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_04_01.ComputeManagementClient`'s
:attr:`virtual_machine_extensions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtension, IO],
**kwargs: Any
) -> _models.VirtualMachineExtension:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(extension_parameters, (IO, bytes)):
_content = extension_parameters
else:
_json = self._serialize.body(extension_parameters, "VirtualMachineExtension")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: _models.VirtualMachineExtension,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation. Required.
:type extension_parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation. Required.
:type extension_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtension, IO],
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation. Is either a model type or a IO type. Required.
:type extension_parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
extension_parameters=extension_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
def _update_initial(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtensionUpdate, IO],
**kwargs: Any
) -> _models.VirtualMachineExtension:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(extension_parameters, (IO, bytes)):
_content = extension_parameters
else:
_json = self._serialize.body(extension_parameters, "VirtualMachineExtensionUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@overload
def begin_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: _models.VirtualMachineExtensionUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation. Required.
:type extension_parameters:
~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtensionUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation. Required.
:type extension_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtensionUpdate, IO],
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation. Is either a model type or a IO type. Required.
:type extension_parameters:
~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtensionUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
extension_parameters=extension_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, vm_name: str, vm_extension_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@distributed_trace
def begin_delete(
self, resource_group_name: str, vm_name: str, vm_extension_name: str, **kwargs: Any
) -> LROPoller[None]:
"""The operation to delete the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be deleted.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@distributed_trace
def get(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> _models.VirtualMachineExtension:
"""The operation to get the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine containing the extension. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtension or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@distributed_trace
def list(
self, resource_group_name: str, vm_name: str, expand: Optional[str] = None, **kwargs: Any
) -> _models.VirtualMachineExtensionsListResult:
"""The operation to get all extensions of a Virtual Machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine containing the extension. Required.
:type vm_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionsListResult or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtensionsListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
cls: ClsType[_models.VirtualMachineExtensionsListResult] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtensionsListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions"
}
|
[
"noreply@github.com"
] |
noreply@github.com
|
4e9df8b1a88f0a2c470ad97f303a25e010fa60f9
|
60a93f1c34617bd08f1862ef076dc77f766d37b2
|
/thirdParty/bullet3-2.88/docs/pybullet_quickstart_guide/WordpressPreview/BuildMarkdeepUtility.py
|
0738cdafd74b4278ae299b75c2dec90554cca285
|
[
"MIT",
"Zlib"
] |
permissive
|
dantros/MonaEngine
|
4f4fdbd2344f553f9bfd0189a4872a0581022f40
|
e3d0048c2fe2dd282b84686f0e31e5741714222b
|
refs/heads/master
| 2023-07-27T10:51:18.565020
| 2021-09-17T00:11:39
| 2021-09-17T00:11:39
| 395,851,979
| 0
| 1
|
MIT
| 2021-08-15T03:23:15
| 2021-08-14T01:32:20
| null |
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
import re
if(__name__=="__main__"):
# Assemble the script which embeds the Markdeep page into the preview blog
PreviewBlogPage=open("PreviewBlogPage.htm","rb").read().decode("utf-8");
HeadMatch=re.search("<head(.*?)>(.*?)</head>",PreviewBlogPage,re.DOTALL);
HeadAttributes=HeadMatch.group(1);
FullDocumentHead=HeadMatch.group(2);
BodyMatch=re.search("<body(.*?)>(.*?)</body>",PreviewBlogPage,re.DOTALL);
BodyAttributes=BodyMatch.group(1);
FullPreviewBody=BodyMatch.group(2);
ArticleHTMLCodeMacro="$(ARTICLE_HTML_CODE)";
iArticleHTMLCodeMacro=FullPreviewBody.find(ArticleHTMLCodeMacro);
DocumentBodyPrefix=FullPreviewBody[0:iArticleHTMLCodeMacro];
DocumentBodySuffix=FullPreviewBody[iArticleHTMLCodeMacro+len(ArticleHTMLCodeMacro):];
FullPrepareHTMLCode=open("PrepareHTML.js","rb").read().decode("utf-8");
ReplacementList=[
("$(FULL_DOCUMENT_HEAD)",FullDocumentHead),
("$(DOCUMENT_BODY_PREFIX)",DocumentBodyPrefix),
("$(DOCUMENT_BODY_SUFFIX)",DocumentBodySuffix)
];
for Macro,Replacement in ReplacementList:
FullPrepareHTMLCode=FullPrepareHTMLCode.replace(Macro,Replacement.replace("\r\n","\\r\\n\\\r\n").replace("'","\\'"));
# Generate code which sets body and head attributes appropriately
for Element,AttributeCode in [("head",HeadAttributes),("body",BodyAttributes)]:
FullPrepareHTMLCode+="\r\n// Setting "+Element+" attributes\r\n";
for Match in re.finditer("(\\w+)=\\\"(.*?)\\\"",AttributeCode):
FullPrepareHTMLCode+="document."+Element+".setAttribute(\""+Match.group(1)+"\",\""+Match.group(2)+"\");\r\n";
open("PrepareHTML.full.js","wb").write(FullPrepareHTMLCode.encode("utf-8"));
# Concatenate all the scripts together
SourceFileList=[
"PrepareHTML.full.js",
"SetMarkdeepMode.js",
"markdeep.min.js",
"DisplayMarkdeepOutput.js",
"InvokeMathJax.js"
];
OutputCode="\r\n\r\n".join(["// "+SourceFile+"\r\n\r\n"+open(SourceFile,"rb").read().decode("utf-8") for SourceFile in SourceFileList]);
OutputFile=open("MarkdeepUtility.js","wb");
OutputFile.write(OutputCode.encode("utf-8"));
OutputFile.close();
print("Done.");
|
[
"byronaaronb@gmail.com"
] |
byronaaronb@gmail.com
|
3724941a22eb118782c4c142d7dc6097e8d37e35
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-fractionDigits-1-3.py
|
32add0c922d5342c7b50eaabb85bc7ee39adc0d0
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 297
|
py
|
from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_fraction_digits_1_xsd.nistschema_sv_iv_atomic_integer_fraction_digits_1 import NistschemaSvIvAtomicIntegerFractionDigits1
obj = NistschemaSvIvAtomicIntegerFractionDigits1(
value=825606520242485152
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
e23c70a6f0bf97c57a6a3211e8ce4ee4c23a4b01
|
22d3d698edfa66d071a77b98d9d293087e699d87
|
/casanova/cli.py
|
1ac34d646aac0c3f4e2ac82fc186be538414654c
|
[
"MIT"
] |
permissive
|
medialab/casanova
|
8b880b1848f8f1ea785fdba483395a7d7085b87f
|
fcd449df0fba33a48693bea4919c81e1654a6866
|
refs/heads/master
| 2023-07-30T04:16:33.079309
| 2023-07-13T12:41:43
| 2023-07-13T12:41:43
| 254,628,154
| 13
| 2
|
MIT
| 2023-01-17T16:00:31
| 2020-04-10T12:23:34
|
Python
|
UTF-8
|
Python
| false
| false
| 14,540
|
py
|
from typing import Optional, List
import re
import sys
import gzip
import json
import math
import random
import statistics
from itertools import islice
from types import GeneratorType
from os.path import join
from urllib.parse import urlsplit, urljoin
from multiprocessing import Pool as MultiProcessPool
from dataclasses import dataclass
from collections import Counter, defaultdict, deque, OrderedDict
from collections.abc import Mapping, Iterable
from casanova import (
Reader,
Enricher,
CSVSerializer,
RowWrapper,
Headers,
Writer,
InferringWriter,
)
from casanova.utils import import_target, flatmap
@dataclass
class InitializerOptions:
code: str
module: bool
row_len: int
args: List[str]
init_codes: List[str]
before_codes: List[str]
after_codes: List[str]
fieldnames: Optional[List[str]] = None
selected_indices: Optional[List[int]] = None
base_dir: Optional[str] = None
# NOTE: just a thin wrapper to make sure we catch KeyboardInterrupt in
# child processes gracefully.
class WorkerWrapper(object):
__slots__ = ("fn",)
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
try:
return self.fn(*args, **kwargs)
except KeyboardInterrupt:
sys.exit(1)
class SingleProcessPool(object):
def imap(self, worker, tasks, chunksize=1):
for t in tasks:
yield worker(t)
def imap_unordered(self, *args, **kwargs):
yield from self.imap(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
return
def get_pool(n: int, options: InitializerOptions):
initargs = (options,)
if n < 2:
multiprocessed_initializer(*initargs)
return SingleProcessPool()
return MultiProcessPool(
n, initializer=multiprocessed_initializer, initargs=initargs
)
def get_csv_serializer(cli_args):
return CSVSerializer(
plural_separator=cli_args.plural_separator,
none_value=cli_args.none_value,
true_value=cli_args.true_value,
false_value=cli_args.false_value,
)
def get_inferring_writer(output_file, cli_args):
return InferringWriter(
output_file,
fieldnames=cli_args.fieldnames,
plural_separator=cli_args.plural_separator,
none_value=cli_args.none_value,
true_value=cli_args.true_value,
false_value=cli_args.false_value,
)
# Global multiprocessing variables
CODE = None
FUNCTION = None
ARGS = None
SELECTION = None
BEFORE_CODES = []
AFTER_CODES = []
EVALUATION_CONTEXT = {}
ROW = None
BASE_DIR = None
def read(path, encoding: str = "utf-8") -> Optional[str]:
global BASE_DIR
if BASE_DIR is not None:
path = join(BASE_DIR, path)
if path.endswith(".gz"):
try:
with gzip.open(path, encoding=encoding, mode="rt") as f:
return f.read()
except FileNotFoundError:
return None
try:
with open(path, encoding="utf-8", mode="r") as f:
return f.read()
except FileNotFoundError:
return None
EVALUATION_CONTEXT_LIB = {
# lib
"join": join,
"math": math,
"mean": statistics.mean,
"median": statistics.median,
"random": random,
"re": re,
"read": read,
"urljoin": urljoin,
"urlsplit": urlsplit,
# classes
"Counter": Counter,
"defaultdict": defaultdict,
"deque": deque,
}
def initialize_evaluation_context():
global EVALUATION_CONTEXT
EVALUATION_CONTEXT = {
**EVALUATION_CONTEXT_LIB,
# state
"fieldnames": None,
"headers": None,
"index": 0,
"row": None,
"cell": None,
"cells": None,
}
def multiprocessed_initializer(options: InitializerOptions):
global CODE
global FUNCTION
global ARGS
global BEFORE_CODES
global AFTER_CODES
global ROW
global SELECTION
global BASE_DIR
# Reset in case of multiple execution from same process
CODE = None
FUNCTION = None
ARGS = None
SELECTION = None
BEFORE_CODES = []
AFTER_CODES = []
ROW = None
BASE_DIR = options.base_dir
initialize_evaluation_context()
if options.module:
FUNCTION = import_target(options.code)
ARGS = options.args
else:
CODE = options.code
BEFORE_CODES = options.before_codes
AFTER_CODES = options.after_codes
if options.selected_indices is not None:
SELECTION = options.selected_indices
if options.fieldnames is not None:
EVALUATION_CONTEXT["fieldnames"] = options.fieldnames
EVALUATION_CONTEXT["headers"] = Headers(options.fieldnames)
headers = EVALUATION_CONTEXT["headers"]
else:
headers = Headers(range(options.row_len))
for init_code in options.init_codes:
exec(init_code, None, EVALUATION_CONTEXT)
EVALUATION_CONTEXT["row"] = RowWrapper(headers, None)
ROW = EVALUATION_CONTEXT["row"]
def select(row):
if SELECTION is None:
return
cells = [row[i] for i in SELECTION]
EVALUATION_CONTEXT["cells"] = cells
EVALUATION_CONTEXT["cell"] = cells[0]
def multiprocessed_worker_using_eval(payload):
global EVALUATION_CONTEXT
i, row = payload
EVALUATION_CONTEXT["index"] = i
ROW._replace(row)
select(row)
try:
for before_code in BEFORE_CODES:
exec(before_code, EVALUATION_CONTEXT, None)
value = eval(CODE, EVALUATION_CONTEXT, None)
for after_code in AFTER_CODES:
exec(after_code, EVALUATION_CONTEXT, None)
return None, i, value
except Exception as e:
return e, i, None
def collect_args(i, row):
for arg_name in ARGS:
if arg_name == "row":
yield ROW
elif arg_name == "index":
yield i
elif arg_name == "fieldnames":
yield EVALUATION_CONTEXT["fieldnames"]
elif arg_name == "headers":
yield EVALUATION_CONTEXT["headers"]
elif arg_name == "cell":
# NOTE: we know SELECTION is relevant because it's validated by CLI
yield row[SELECTION[0]]
elif arg_name == "cells":
# NOTE: we know SELECTION is relevant because it's validated by CLI
for idx in SELECTION:
yield row[idx]
else:
raise TypeError("unknown arg_name: %s" % arg_name)
def multiprocessed_worker_using_function(payload):
i, row = payload
ROW._replace(row)
args = tuple(collect_args(i, row))
try:
value = FUNCTION(*args)
# NOTE: consuming generators
if isinstance(value, GeneratorType):
value = list(value)
return None, i, value
except Exception as e:
return e, i, None
# TODO: go to minet for progress bar and rich?
# TODO: write proper cli documentation
def mp_iteration(cli_args, reader: Reader):
worker = (
multiprocessed_worker_using_eval
if not cli_args.module
else multiprocessed_worker_using_function
)
if cli_args.processes > 1:
worker = WorkerWrapper(worker)
selected_indices = None
if cli_args.select:
if reader.headers is not None:
selected_indices = reader.headers.select(cli_args.select)
else:
selected_indices = Headers.select_no_headers(cli_args.select)
init_options = InitializerOptions(
code=cli_args.code,
module=cli_args.module,
args=cli_args.args,
init_codes=cli_args.init,
before_codes=cli_args.before,
after_codes=cli_args.after,
row_len=reader.row_len,
fieldnames=reader.fieldnames,
selected_indices=selected_indices,
base_dir=cli_args.base_dir,
)
with get_pool(cli_args.processes, init_options) as pool:
# NOTE: we keep track of rows being worked on from the main process
# to avoid serializing them back with worker result.
worked_rows = {}
def payloads():
for t in reader.enumerate():
worked_rows[t[0]] = t[1]
yield t
mapper = pool.imap if not cli_args.unordered else pool.imap_unordered
for exc, i, result in mapper(worker, payloads(), chunksize=cli_args.chunk_size):
row = worked_rows.pop(i)
if exc is not None:
if cli_args.ignore_errors:
result = None
else:
raise exc
yield i, row, result
def map_action(cli_args, output_file):
serialize = get_csv_serializer(cli_args)
with Enricher(
cli_args.file,
output_file,
add=[cli_args.new_column],
delimiter=cli_args.delimiter,
) as enricher:
for _, row, result in mp_iteration(cli_args, enricher):
enricher.writerow(row, [serialize(result)])
def flatmap_action(cli_args, output_file):
serialize = get_csv_serializer(cli_args)
with Enricher(
cli_args.file,
output_file,
add=[cli_args.new_column],
delimiter=cli_args.delimiter,
) as enricher:
for _, row, result in mp_iteration(cli_args, enricher):
for value in flatmap(result):
enricher.writerow(row, [serialize(value)])
def filter_action(cli_args, output_file):
with Enricher(cli_args.file, output_file, delimiter=cli_args.delimiter) as enricher:
for _, row, result in mp_iteration(cli_args, enricher):
if cli_args.invert_match:
result = not result
if result:
enricher.writerow(row)
def map_reduce_action(cli_args, output_file):
acc_fn = None
if cli_args.module:
acc_fn = import_target(cli_args.accumulator)
with Reader(
cli_args.file,
delimiter=cli_args.delimiter,
) as enricher:
acc_context = EVALUATION_CONTEXT_LIB.copy()
acc = None
initialized = False
if cli_args.init_value is not None:
initialized = True
acc = eval(cli_args.init_value, acc_context, None)
acc_context["acc"] = acc
for _, row, result in mp_iteration(cli_args, enricher):
if not initialized:
acc_context["acc"] = result
initialized = True
continue
if acc_fn is None:
acc_context["current"] = result
acc_context["acc"] = eval(cli_args.accumulator, acc_context, None)
else:
acc_context["acc"] = acc_fn(acc_context["acc"], result)
final_result = acc_context["acc"]
if cli_args.json:
json.dump(
final_result,
output_file,
indent=2 if cli_args.pretty else None,
ensure_ascii=False,
)
print(file=output_file)
elif cli_args.csv:
writer = get_inferring_writer(output_file, cli_args)
writer.writerow(final_result)
else:
print(final_result, file=output_file)
class GroupWrapper:
__slots__ = ("__name", "__rows", "__wrapper")
def __init__(self, fieldnames):
self.__wrapper = RowWrapper(Headers(fieldnames), range(len(fieldnames)))
def _replace(self, name, rows):
self.__name = name
self.__rows = rows
@property
def name(self):
return self.__name
def __len__(self):
return len(self.__rows)
def __iter__(self):
for row in self.__rows:
self.__wrapper._replace(row)
yield self.__wrapper
def groupby_action(cli_args, output_file):
agg_fn = None
if cli_args.module:
agg_fn = import_target(cli_args.aggregator)
with Reader(
cli_args.file,
delimiter=cli_args.delimiter,
) as enricher:
# NOTE: using an ordered dict to guarantee stability for all python versions
groups = OrderedDict()
# Grouping
for _, row, result in mp_iteration(cli_args, enricher):
l = groups.get(result)
if l is None:
l = [row]
groups[result] = l
else:
l.append(row)
# Aggregating
agg_context = EVALUATION_CONTEXT_LIB.copy()
header_emitted = False
writer = Writer(output_file)
fieldnames = ["group"]
mapping_fieldnames = None
serializer = get_csv_serializer(cli_args)
if cli_args.fieldnames is not None:
mapping_fieldnames = cli_args.fieldnames
fieldnames += cli_args.fieldnames
header_emitted = True
writer.writerow(fieldnames)
group_wrapper = GroupWrapper(enricher.fieldnames)
for name, rows in groups.items():
group_wrapper._replace(name, rows)
if agg_fn is not None:
result = agg_fn(group_wrapper)
else:
agg_context["group"] = group_wrapper
result = eval(cli_args.aggregator, agg_context, None)
name = serializer(name)
if isinstance(result, Mapping):
if not header_emitted:
mapping_fieldnames = list(result.keys())
fieldnames += mapping_fieldnames
writer.writerow(fieldnames)
header_emitted = True
writer.writerow(
[name] + serializer.serialize_dict_row(result, mapping_fieldnames)
)
elif isinstance(result, Iterable) and not isinstance(result, (bytes, str)):
if not header_emitted:
fieldnames += ["col%i" % i for i in range(1, len(result) + 1)]
writer.writerow(fieldnames)
header_emitted = True
writer.writerow([name] + serializer.serialize_row(result))
else:
if not header_emitted:
writer.writerow(fieldnames + ["value"])
header_emitted = True
writer.writerow([name, serializer(result)])
def reverse_action(cli_args, output_file):
with Enricher(
cli_args.file, output_file, delimiter=cli_args.delimiter, reverse=True
) as enricher:
it = enricher
if cli_args.lines is not None:
it = islice(enricher, cli_args.lines)
for row in it:
enricher.writerow(row)
|
[
"guillaumeplique@gmail.com"
] |
guillaumeplique@gmail.com
|
17b48235e0f05f20c63ef835371d9913b5d0a9e1
|
2b7f5c15b0d6b90ffd85cb90f9e3d7f9b7a03a9b
|
/counting_bunnies.py
|
e9f365726f85f047068181854ad76a87de179b56
|
[] |
no_license
|
dpatel698/Google-Foobar
|
67fa5943eed33e1aa81ead11d588069647446930
|
434b2232b292b0b2e3d63ba46b948628e15e3261
|
refs/heads/master
| 2020-12-22T12:34:24.274893
| 2020-01-28T16:42:32
| 2020-01-28T16:42:32
| 236,782,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
def answer(x, y):
# your code here
vertical_sum = 1 + sum(range(y))
if x > 1:
horizontal_sum = sum(range(y + 1, (y + x)))
else:
horizontal_sum = 0
print(vertical_sum)
print(horizontal_sum)
return str(vertical_sum + horizontal_sum)
|
[
"dpatel0698@gmail.com"
] |
dpatel0698@gmail.com
|
ee714b917523630f20db920691fa7adf93352b79
|
b373081e3dd8ddc7520ca57841a0c17d2682ad74
|
/Source/systemrl/environments/cartpole.py
|
7dc0691b9031afdb006b8d35bac9120361147e0a
|
[
"MIT"
] |
permissive
|
aarsheem/696-ds
|
f8e680cf2d2acef6dd04aa14f406d31cce946dfe
|
2d74b1e3f430e369202982d7ad8c56f362b00f76
|
refs/heads/master
| 2020-12-26T22:14:06.200655
| 2020-07-08T22:08:04
| 2020-07-08T22:08:04
| 237,663,363
| 2
| 2
|
MIT
| 2020-04-24T15:16:49
| 2020-02-01T19:05:05
|
Python
|
UTF-8
|
Python
| false
| false
| 5,216
|
py
|
import numpy as np
from typing import Tuple
from .skeleton import Environment
class Cartpole(Environment):
"""
The cart-pole environment as described in the 687 course material. This
domain is modeled as a pole balancing on a cart. The agent must learn to
move the cart forwards and backwards to keep the pole from falling.
Actions: left (0) and right (1)
Reward: 1 always
Environment Dynamics: See the work of Florian 2007
(Correct equations for the dynamics of the cart-pole system) for the
observation of the correct dynamics.
"""
def __init__(self):
self._name = "Cartpole"
# TODO: properly define the variables below
self._action = None
self._reward = 0
self._isEnd = False
self._gamma = 1.0
# define the state # NOTE: you must use these variable names
self._x = 0. # horizontal position of cart
self._v = 0. # horizontal velocity of the cart
self._theta = 0. # angle of the pole
self._dtheta = 0. # angular velocity of the pole
# dynamics
self._g = 9.8 # gravitational acceleration (m/s^2)
self._mp = 0.1 # pole mass
self._mc = 1.0 # cart mass
self._l = 0.5 # (1/2) * pole length
self._dt = 0.02 # timestep
self._t = 0.0 # total time elapsed NOTE: you must use this variable
self.xMin = -2.4
self.xMax = 2.4
self.vMin = -10
self.vMax = 10
self.thetaMin = -np.pi / 12.0
self.thetaMax = np.pi / 12.0
self.omegaMin = -np.pi
self.omegaMax = np.pi
@property
def name(self)->str:
return self._name
@property
def reward(self) -> float:
return self._reward
@property
def gamma(self) -> float:
return self._gamma
@property
def action(self) -> int:
return self._action
@property
def isEnd(self) -> bool:
return self._isEnd
@property
def state(self) -> np.ndarray:
return np.array((self._x, self._v, self._theta, self._dtheta))
def nextState(self, state: np.ndarray, action: int) -> np.ndarray:
"""
Compute the next state of the pendulum using the euler approximation to the dynamics
"""
dstate = np.zeros(4)
dstate[0] = state[1]
dstate[2] = state[3]
F = action * 20.0 - 10.0
cos_multiplier = (-F - self._mp * self._l * (state[3]**2) * np.sin(state[2])) / (self._mp + self._mc)
denominator = self._l * (4.0/3.0 - (self._mp * (np.cos(state[2])**2))/(self._mp + self._mc))
dstate[3] = (self._g * np.sin(state[2]) + np.cos(state[2]) * cos_multiplier) / denominator
ml_multipier = (dstate[2]**2) * np.sin(state[2]) - dstate[3] * np.cos(state[2])
dstate[1] = (F + self._mp * self._l * ml_multipier) / (self._mp + self._mc)
return state + dstate * self._dt
def R(self, state: np.ndarray, action: int, nextState: np.ndarray) -> float:
#note the new reward
#at 15 degrees reward will be -1
return np.cos(12 * state[2])
def step(self, action: int) -> Tuple[np.ndarray, float, bool]:
"""
takes one step in the environment and returns the next state, reward, and if it is in the terminal state
"""
next_state = self.nextState(self.state, action)
self._reward = self.R(self.state, action, next_state)
self._action = action
self._x = next_state[0]
self._v = next_state[1]
self._theta = next_state[2]
self._dtheta = next_state[3]
self._t += self._dt
self._isEnd = self.terminal()
return next_state, self._reward, self._isEnd
def reset(self) -> None:
"""
resets the state of the environment to the initial configuration
"""
self._isEnd = False
self._x = 0. # horizontal position of cart
self._v = 0. # horizontal velocity of the cart
self._theta = 0. # angle of the pole
self._dtheta = 0. # angular velocity of the pole
self._t = 0
self._action = None
def normState(self):
"""
Normalize state values in range 0 -- 1
"""
x = (self._x - self.xMin)/(self.xMax - self.xMin)
v = (self._v - self.vMin)/(self.vMax - self.vMin)
#to spread out the distribution
x = (x - 0.5) * 10 + 0.5
v = (v - 0.5) * 5 + 0.5
theta = (self._theta - self.thetaMin)/(self.thetaMax - self.thetaMin)
dtheta = (self._dtheta - self.omegaMin)/(self.omegaMax - self.omegaMin)
return np.array([x,v,theta,dtheta])
def terminal(self) -> bool:
"""
The episode is at an end if:
time is greater that 20 seconds
pole falls |theta| > (pi/12.0)
cart hits the sides |x| >= 3
"""
if self._t > 20:
return True
if np.abs(self._theta) > np.pi/12.0:
return True
if np.abs(self._x) >= 3.0:
return True
return False
def numActions(self):
return 2
def numFeatures(self):
return 4
|
[
"aarsheemishra@1x-nat-vl931-172-30-152-76.wireless.umass.edu"
] |
aarsheemishra@1x-nat-vl931-172-30-152-76.wireless.umass.edu
|
598aa5789fc89d20614a949df27117f073692147
|
b2c780661aec8076a0b6d00bf8ea0d443a117df6
|
/Popularity/DCAFPilot/test/utils_t.py
|
b5af29934995578af40c4def334385a5c2d302eb
|
[] |
no_license
|
maitdaoud/DMWMAnalytics
|
894fa2afb8d83a5275f0abd61b74f4f839150cb0
|
fec7ef3e5240973db96ba53179940950002adbd8
|
refs/heads/master
| 2020-04-11T03:33:43.164136
| 2017-04-01T14:07:42
| 2017-04-01T14:07:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
#!/usr/bin/env python
#pylint: disable-msg=C0301,C0103
"""
Unit test for StorageManager class
"""
import os
import re
import time
import unittest
from pymongo import MongoClient
from DCAF.utils.utils import popdb_date, ndays
class testStorageManager(unittest.TestCase):
"""
A test class for the StorageManager class
"""
def setUp(self):
"set up connection"
pass
def tearDown(self):
"Perform clean-up"
pass
def test_popdb_date(self):
"Test popdb_date method"
result = popdb_date('20140105')
expect = '2014-1-5'
self.assertEqual(expect, result)
result = popdb_date(expect)
self.assertEqual(expect, result)
def test_ndays(self):
"Test ndays function"
time1, time2 = '20141120', '20141124'
result = ndays(time1, time2)
expect = 4
self.assertEqual(expect, result)
#
# main
#
if __name__ == '__main__':
unittest.main()
|
[
"vkuznet@gmail.com"
] |
vkuznet@gmail.com
|
f327af434bdb44b8db26624273fa576fedb584a9
|
371fe9a1fdeb62ad1142b34d732bde06f3ce21a0
|
/scripts/compute_path_pair_distances.py
|
32499ed5d2cd2871d18a77acc24343b70b16f798
|
[] |
no_license
|
maickrau/rdna_resolution
|
971f3b7e803565c9432be69b8e2a2852f55b8b79
|
aab42310c31e655cbbc318331082fa3436d69075
|
refs/heads/master
| 2023-03-03T05:14:33.966930
| 2021-02-17T20:45:20
| 2021-02-17T20:45:20
| 339,851,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,426
|
py
|
#!/usr/bin/python
import sys
graphfile = sys.argv[1]
max_diff = int(sys.argv[2])
modulo = int(sys.argv[3])
moduloindex = int(sys.argv[4])
# name \t path from stdin
def revcomp(s):
comp = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
return "".join(comp[c] for c in s[::-1])
def pathseq(p):
global nodeseqs
seq_no_hpc = "".join(nodeseqs[n[1:]] if n[0] == '>' else revcomp(nodeseqs[n[1:]]) for n in p)
# seq_hpc = seq_no_hpc[0]
# for i in range(1, len(seq_no_hpc)):
# if seq_no_hpc[i] != seq_no_hpc[i-1]: seq_hpc += seq_no_hpc[i]
# return seq_hpc
return seq_no_hpc
def edit_distance_simple(p1, p2):
global max_diff
if len(p1) - len(p2) <= -max_diff or len(p1) - len(p2) >= max_diff: return None
last_row = []
for i in range(0, len(p2)+1):
last_row.append(i)
for i in range(1, len(p1)):
next_row = [i]
min_this_row = i
for j in range(0, len(p2)):
index = len(next_row)
next_row.append(min(next_row[index-1]+1, last_row[index]+1))
if p1[i] == p2[j]:
next_row[index] = min(next_row[index], last_row[index-1])
else:
next_row[index] = min(next_row[index], last_row[index-1]+1)
min_this_row = min(min_this_row, next_row[index])
last_row = next_row
# if min_this_row >= max_diff: return None
return last_row[-1]
def edit_distance_wfa(p1, p2):
global max_diff
# use wfa because new and fancy
# https://academic.oup.com/bioinformatics/advance-article/doi/10.1093/bioinformatics/btaa777/5904262?rss=1
if len(p1) - len(p2) < -max_diff or len(p1) - len(p2) > max_diff: return None
start_match = -1
while start_match+1 < len(p1) and start_match+1 < len(p2) and p1[start_match+1] == p2[start_match+1]:
start_match += 1
if start_match == len(p1) and start_match == len(p2): return 0
last_column = [start_match]
# sys.stderr.write("0" + "\n")
for i in range(1, max_diff):
offset = i-1
# sys.stderr.write(str(i) + "\n")
next_column = []
last_match =last_column[-i+offset+1]
while last_match+1-i < len(p1) and last_match+1 < len(p2) and p1[last_match+1-i] == p2[last_match+1]:
last_match += 1
if last_match+1-i >= len(p1) and last_match+1 >= len(p2):
return i
next_column.append(last_match)
for j in range(-i+1, +i):
last_match = last_column[j+offset]+1
if j > -i+1:
last_match = max(last_match, last_column[j+offset-1]-1)
if j < i-1:
last_match = max(last_match, last_column[j+offset+1])
while last_match+1+j < len(p1) and last_match+1 < len(p2) and p1[last_match+1+j] == p2[last_match+1]:
last_match += 1
if last_match+1+j >= len(p1) and last_match+1 >= len(p2):
return i
next_column.append(last_match)
last_match = last_column[i+offset-1]-1
while last_match+1+i < len(p1) and last_match+1 < len(p2) and p1[last_match+1+i] == p2[last_match+1]:
last_match += 1
if last_match+1+i >= len(p1) and last_match+1 >= len(p2):
return i
next_column.append(last_match)
last_column = next_column
return None
def edit_distance(p1, p2):
global max_diff
# use wfa because new and fancy
# https://academic.oup.com/bioinformatics/advance-article/doi/10.1093/bioinformatics/btaa777/5904262?rss=1
if len(p1) - len(p2) < -max_diff or len(p1) - len(p2) > max_diff: return None
start_match = -1
while start_match+1 < len(p1) and start_match+1 < len(p2) and p1[start_match+1] == p2[start_match+1]:
start_match += 1
if start_match == len(p1) and start_match == len(p2): return 0
last_column = {0: start_match}
for i in range(1, max_diff):
offset = i-1
next_column = {}
for column in last_column:
if column not in next_column: next_column[column] = 0
next_column[column] = max(next_column[column], last_column[column]+1)
if column+1 not in next_column: next_column[column+1] = 0
next_column[column+1] = max(next_column[column+1], last_column[column])
if column-1 not in next_column: next_column[column-1] = 0
next_column[column-1] = max(next_column[column-1], last_column[column]-1)
p1_pos = last_column[column]
p2_pos = last_column[column] + column
if p1_pos >= 4 and p2_pos >= 4:
if p1[p1_pos-4:p1_pos] == p2[p2_pos-4:p2_pos] and p1[p1_pos-4:p1_pos-2] == p1[p1_pos-2:p1_pos]:
if p1_pos+2 <= len(p1) and p1[p1_pos:p1_pos+2] == p1[p1_pos-2:p1_pos]:
extend_until = 0
while True:
if column-extend_until not in next_column: next_column[column-extend_until] = 0
next_column[column-extend_until] = max(next_column[column-extend_until], last_column[column]+extend_until)
if p1_pos+extend_until+2 <= len(p1) and p1[p1_pos+extend_until:p1_pos+extend_until+2] == p1[p1_pos-2:p1_pos]:
extend_until += 2
else:
break
if p2_pos+2 <= len(p2) and p2[p2_pos:p2_pos+2] == p2[p2_pos-2:p2_pos]:
extend_until = 0
while True:
if column+extend_until+2 not in next_column: next_column[column+extend_until+2] = 0
next_column[column+extend_until+2] = max(next_column[column+extend_until+2], last_column[column])
if p2_pos+extend_until+2 <= len(p2) and p2[p2_pos+extend_until:p2_pos+extend_until+2] == p2[p2_pos-2:p2_pos]:
extend_until += 2
else:
break
for column in next_column:
p1_pos = next_column[column]
p2_pos = next_column[column] + column
while p1_pos+1 < len(p1) and p2_pos+1 < len(p2) and p1[p1_pos+1] == p2[p2_pos+1]:
next_column[column] += 1
p1_pos += 1
p2_pos += 1
if p1_pos+1 >= len(p1) and p2_pos+1 >= len(p2): return i
last_column = next_column
return None
nodeseqs = {}
with open(graphfile) as f:
for l in f:
parts = l.strip().split('\t')
if parts[0] == 'S':
nodeseqs[parts[1]] = parts[2]
num = 0
pathnum = {}
paths = {}
for l in sys.stdin:
parts = l.strip().split('\t')
name = parts[0]
last_break = 0
path = []
pathstr = parts[1] + '>'
for i in range(1, len(pathstr)):
if pathstr[i] == '<' or pathstr[i] == '>':
path.append(pathstr[last_break:i])
last_break = i
if name in paths: print(name)
assert name not in paths
paths[name] = pathseq(path)
pathnum[name] = num
num += 1
# print(name + "\t" + paths[name])
for path1 in paths:
if pathnum[path1] % modulo != moduloindex: continue
for path2 in paths:
if path1 <= path2: continue
value = max_diff + 1
edit_dist = edit_distance(paths[path1], paths[path2])
# edit_dist = edit_distance_simple(paths[path1], paths[path2])
if edit_dist is None: continue
if edit_dist is not None: value = edit_dist
print(path1 + "\t" + path2 + "\t" + str(value))
|
[
"m_rautiainen@hotmail.com"
] |
m_rautiainen@hotmail.com
|
cca9f2e5ed6c7cd9fe744913449f05e61d1ed854
|
8a47ab47a101d4b44dd056c92a1763d5fac94f75
|
/力扣/简单练习/300-最长上升子序列.py
|
edecfbee733ea3c1f051716235583aa67c1a5524
|
[] |
no_license
|
Clint-cc/Leecode
|
d5528aa7550a13a5bcf2f3913be2d5db2b5299f3
|
8befe73ab3eca636944800e0be27c179c45e1dbf
|
refs/heads/master
| 2020-09-14T07:35:41.382377
| 2020-07-01T01:27:18
| 2020-07-01T01:27:18
| 223,066,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,663
|
py
|
# !D:/Code/python
# -*- coding:utf-8 -*-
# @Author : Clint
# @Question : 给定一个无序的整数数组,找到其中最长上升子序列的长度。
def lengthOfLIS(nums):
'''
思路:遍历数组,当前的下一个元素大于当前,count+1,当不大于时比较count和max_count,
最后输出max_count
这题有坑: 输入[10,9,2,5,3,7,101,18],输出4,解释:最长的上升子序列是 [2,3,7,101],它的长度是 4
:param nums:
:return:
'''
count = 1
max_count = 1
for i in range(len(nums) - 1):
if nums[i + 1] >= nums[i]:
count += 1
else:
if count > max_count:
max_count = count
count = 1
else:
count = 1
if max_count < count:
max_count = count
return max_count
# 动态规划
def lengthOfLIS(nums):
if not nums:
return 0
dp = [1] * len(nums)
for i in range(len(nums)):
for j in range(i):
if nums[j] < nums[i]: # 如果要求非严格递增,将此行 '<' 改为 '<=' 即可。
dp[i] = max(dp[i], dp[j] + 1)
return max(dp)
# 二分查找
def lengthOfLIS(nums):
d = []
for n in nums:
if not d or n > d[-1]:
d.append(n)
else:
l, r = 0, len(d) - 1
loc = r
while l <= r:
mid = (l + r) // 2
if d[mid] >= n:
loc = mid
r = mid - 1
else:
l = mid + 1
d[loc] = n
return len(d)
print(lengthOfLIS([1, 2, 5, 3, 7, 11, 18]))
|
[
"clint1801@163.com"
] |
clint1801@163.com
|
ad790c2993b23a15711a5f19aa89999275d6fc8c
|
32986e2c8aa585699f810534444c9a3ff1e4f269
|
/task4_testing.py
|
64f2871585898cfc8150f3430f8545e79e0eb39e
|
[] |
no_license
|
mingYi-ch/aml_task4
|
5d24a3107a8ca00083c9c4b71a06fdfc7eb5947b
|
438050dd3f7e09cc34d34f5db65a5ea9b72e8c6d
|
refs/heads/master
| 2022-03-22T19:45:18.707357
| 2019-12-09T14:02:22
| 2019-12-09T14:02:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
import numpy as np
import biosppy.signals.eeg as eeg
import pandas as pd
def test_eeg(eeg1, eeg2):
# testing plot
x_sample = np.concatenate((eeg1, eeg2), axis = 0)
# transpose to put the signals into column
x_sample = np.transpose(x_sample)
# print theta
# print(x_sample)
# x_sample = np.concatenate((x_sample, x_sample), axis=0)
signal_processed = eeg.eeg(signal=x_sample, sampling_rate=128, show=False)
# # theta
theta = signal_processed[3]
alow = signal_processed[4]
ahigh = signal_processed[5]
beta = signal_processed[6]
gamma = signal_processed[7]
features = np.concatenate((theta, alow, ahigh, beta, gamma), axis=0).ravel()
print(features.shape)
def read_from_file(eeg1, eeg2, nrows = 10):
# read from files
x_train_eeg1 = pd.read_csv(eeg1, index_col='Id', nrows = nrows).to_numpy()
x_train_eeg2 = pd.read_csv(eeg2, index_col='Id', nrows = nrows).to_numpy()
return x_train_eeg1, x_train_eeg2
if __name__ == '__main__':
train_part = read_from_file("train_eeg1.csv", "train_eeg2.csv", 4)
eeg1s = train_part[0]
eeg2s = train_part[1]
# for mat in zip(eeg1s, eeg2s):
# print(mat)
# print(mat[0])
# break
eeg1 = eeg1s[3, :].reshape(1, -1)
eeg2 = eeg1s[3, :].reshape(1, -1)
# print(eeg1.shape) # size 1 * 512
test_eeg(eeg1, eeg2)
|
[
"myi@student.ethz.ch"
] |
myi@student.ethz.ch
|
823cfd503c40f9c544e77c62fdbcc4bb86ec03d6
|
dd28aede0d492d265e27d491eb46be5bda03e26c
|
/experimento mnist nuevo/experiment.py
|
6fdcd91bdd6ac6aaa2e16f69a3c049eb628d7613
|
[] |
no_license
|
beeva-ricardoguerrero/Floydhub_experiments
|
db008ccb75ca8e7a2ff952440c56076adfe69b2a
|
015b59b7d798e6c468d09ff5dead45edb108750c
|
refs/heads/master
| 2021-01-01T04:13:25.298975
| 2017-07-13T15:53:30
| 2017-07-13T15:53:30
| 97,140,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,925
|
py
|
import json
import logging
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.core.protobuf import meta_graph_pb2
import mnist_model
import mnist
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('batch_size', 100,
'Batch size. Must divide evenly into the dataset sizes.')
flags.DEFINE_integer('max_steps', 10000, 'Number of steps to run trainer.')
flags.DEFINE_integer('checkpoint', 100, 'Interval steps to save checkpoint.')
flags.DEFINE_string('log_dir', '/tmp/logs',
'Directory to store checkpoints and summary logs')
flags.DEFINE_string('model_dir', '/tmp/model',
'Directory to store trained model')
flags.DEFINE_string('data_dir', '/tmp/data',
'Directory to store training data')
flags.DEFINE_boolean('local_data', False,
'If ture, don\'t fetch training data from the web')
# Global flags
BATCH_SIZE = FLAGS.batch_size
MODEL_DIR = FLAGS.model_dir
LOG_DIR = FLAGS.log_dir
DATA_DIR = FLAGS.data_dir
LOCAL_DATA = FLAGS.local_data
MAX_STEPS = FLAGS.max_steps
CHECKPOINT = FLAGS.checkpoint
def run_training():
with tf.Graph().as_default() as graph:
# Prepare training data
mnist_data = mnist.read_data_sets(DATA_DIR, one_hot=True,
local_only=LOCAL_DATA)
# Create placeholders
x = tf.placeholder(tf.float32, [None, 784])
t = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32, [])
global_step = tf.Variable(0, trainable=False) # This is a useless variable (in this code) but it's use to not brake the API
# Add test loss and test accuracy to summary
test_loss = tf.placeholder(tf.float32, [])
test_accuracy = tf.placeholder(tf.float32, [])
tf.summary.scalar('Test_loss', test_loss)
tf.summary.scalar('Test_accuracy', test_accuracy)
# Define a model
p = mnist_model.get_model(x, keep_prob, training=True)
train_step, loss, accuracy = mnist_model.get_trainer(p, t, global_step)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
summary = tf.summary.merge_all()
# Create a supervisor
sv = tf.train.Supervisor(is_chief=True, logdir=LOG_DIR,
init_op=init_op, saver=saver, summary_op=None,
global_step=global_step, save_model_secs=0)
# Create a session and start a training loop
with sv.managed_session() as sess:
reports, step = 0, 0
start_time = time.time()
while not sv.should_stop() and step < MAX_STEPS:
images, labels = mnist_data.train.next_batch(BATCH_SIZE)
feed_dict = {x:images, t:labels, keep_prob:0.5}
_, loss_val, step = sess.run([train_step, loss, global_step], feed_dict=feed_dict)
if step > CHECKPOINT * reports:
reports += 1
logging.info('Step: %d, Train loss: %f', step, loss_val)
# Evaluate the test loss and test accuracy
loss_vals, acc_vals = [], []
for _ in range(len(mnist_data.test.labels) // BATCH_SIZE):
images, labels = mnist_data.test.next_batch(BATCH_SIZE)
feed_dict = {x:images, t:labels, keep_prob:1.0}
loss_val, acc_val = sess.run([loss, accuracy], feed_dict=feed_dict)
loss_vals.append(loss_val)
acc_vals.append(acc_val)
loss_val, acc_val = np.sum(loss_vals), np.mean(acc_vals)
# Save summary
feed_dict = {test_loss:loss_val, test_accuracy:acc_val}
sv.summary_computed(sess, sess.run(summary, feed_dict=feed_dict), step)
sv.summary_writer.flush()
logging.info('Time elapsed: %d', (time.time() - start_time))
logging.info('Step: %d, Test loss: %f, Test accuracy: %f',
step, loss_val, acc_val)
sv.stop()
def main(_):
run_training()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tf.app.run()
|
[
"ricardo.guerrero@beeva.com"
] |
ricardo.guerrero@beeva.com
|
8bae87091c7930ff91f08ddc1c7770485a685c56
|
cb27eabebe1a0b53ff16909f832780203cff063e
|
/script/script_template.py
|
25902ee4cb47ae732d3c541f10776ffe5c8abeb3
|
[
"MIT"
] |
permissive
|
yota-p/kaggle_titanic
|
e31dfc3196d038c61336d2cbd814c76e14ace352
|
36d2c53711482195f519d9280abadf0d6afa9a15
|
refs/heads/master
| 2023-03-30T10:30:44.790817
| 2021-03-23T15:06:34
| 2021-03-23T15:06:34
| 343,035,079
| 0
| 0
|
MIT
| 2021-03-23T15:06:35
| 2021-02-28T06:14:06
|
Python
|
UTF-8
|
Python
| false
| false
| 427
|
py
|
import os
import gzip
import base64
from pathlib import Path
from typing import Dict
def main():
# this is base64 encoded source code
file_data: Dict = {file_data}
for path, encoded in file_data.items():
print(path)
path = Path(path)
os.makedirs(str(path.parent), exist_ok=True)
path.write_bytes(gzip.decompress(base64.b64decode(encoded)))
if __name__ == '__main__':
main()
|
[
"930713yh@gmail.com"
] |
930713yh@gmail.com
|
1df3e72e11e7eb14e23c5cb608c39717d22fb745
|
bacd03dbc158458d3766c94655950b2de553089e
|
/dappx/migrations/0003_remove_userprofileinfo_city.py
|
f59a90eda0014ffe6786fa06578bc59de6ca5295
|
[] |
no_license
|
abhifindatution/django
|
bdad14b35da3d55a912b37be1d48cc337d30f5b4
|
7ac66fcbc0a6975c23bda6e8f53d176c2dbcae6d
|
refs/heads/master
| 2023-05-03T21:33:44.124392
| 2019-08-04T11:58:53
| 2019-08-04T11:58:53
| 200,459,562
| 0
| 0
| null | 2023-04-21T20:35:32
| 2019-08-04T06:47:26
|
CSS
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
# Generated by Django 2.1.1 on 2019-05-14 06:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dappx', '0002_auto_20190510_1323'),
]
operations = [
migrations.RemoveField(
model_name='userprofileinfo',
name='city',
),
]
|
[
"rajjo@findatuton.com"
] |
rajjo@findatuton.com
|
d7057217c970f3bd51a646a4638bb77b219d723f
|
e055c9386652a4b2d271e81493f930e2e66515d1
|
/Problems/mixedCase/task.py
|
27342d93de5c0fe991cf2aa0069eaf621f70e2d2
|
[] |
no_license
|
flo62134/hyperskill_python_tic_tac_toe
|
0d8e5bf893f754528bc36443598a58c8feec5872
|
36366860be5ec18835b7702bd26e29e81536de0f
|
refs/heads/master
| 2022-08-02T11:26:56.526051
| 2020-05-21T17:45:32
| 2020-05-21T17:45:32
| 265,679,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
lower = input()
words = lower.split()
upper_words = [word.title() for word in words]
upper_words[0] = upper_words[0].lower()
camel = "".join(upper_words)
print(camel)
|
[
"florentbrassart31@gmail.com"
] |
florentbrassart31@gmail.com
|
7d3a565b843d3a511283b8290b2e3e98f9f02a74
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/soisson.py
|
2f90d49960b18e683a39c2e7e75ccc653b9bb91e
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 178
|
py
|
ii = [('WilbRLW4.py', 1), ('CarlTFR.py', 6), ('CookGHP2.py', 1), ('KiddJAE.py', 1), ('ClarGE.py', 2), ('BuckWGM.py', 2), ('WadeJEB.py', 1), ('GodwWLN.py', 1), ('BuckWGM2.py', 1)]
|
[
"prabhjyotsingh95@gmail.com"
] |
prabhjyotsingh95@gmail.com
|
d4b3c37168303b568f64ff5fef401bc1cc1264b2
|
3400394303380c2510b17b95839dd4095abc55a4
|
/src/py310/lesson02/comments.py
|
a4dca2ef7c776bd871c81c1adcdd13adb12c2fce
|
[
"MIT"
] |
permissive
|
IBRAR21/py310_sp2021
|
daf53b76decf060d72201a3db66f0f7c697876a7
|
584e37b9d96654c1241fc787d157c292301d5bf7
|
refs/heads/master
| 2023-05-30T16:43:09.614565
| 2021-06-09T21:41:14
| 2021-06-09T21:41:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,828
|
py
|
# --------------------------------------------------------------------------------- #
# AQUABUTTON wxPython IMPLEMENTATION
#
# Andrea Gavana, @ 07 October 2008
# Latest Revision: 24 Nov 2011, 22.00 GMT
#
#
# TODO List
#
# 1) Anything to do?
#
#
# For all kind of problems, requests of enhancements and bug reports, please
# write to me at:
#
# andrea.gavana@gmail.com
# andrea.gavana@maerskoil.com
#
# Or, obviously, to the wxPython mailing list!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------------- #
"""
:class:`AquaButton` is another custom-drawn button class which *approximatively* mimics
the behaviour of Aqua buttons on the Mac.
Description
===========
:class:`AquaButton` is another custom-drawn button class which *approximatively* mimics
the behaviour of Aqua buttons on the Mac. At the moment this class supports:
* Bubble and shadow effects;
* Customizable background, foreground and hover colours;
* Rounded-corners buttons;
* Text-only or image+text buttons;
* Pulse effect on gaining focus.
And a lot more. Check the demo for an almost complete review of the functionalities.
Usage
=====
Sample usage::
import wx
import wx.lib.agw.aquabutton as AB
app = wx.App(0)
frame = wx.Frame(None, -1, "AquaButton Test")
mainPanel = wx.Panel(frame)
mainPanel.SetBackgroundColour(wx.WHITE)
# Initialize AquaButton 1 (with image)
bitmap = wx.Bitmap("my_button_bitmap.png", wx.BITMAP_TYPE_PNG)
btn1 = AB.AquaButton(mainPanel, -1, bitmap, "AquaButton")
# Initialize AquaButton 2 (no image)
btn2 = AB.AquaButton(mainPanel, -1, None, "Hello World!")
frame.Show()
app.MainLoop()
Supported Platforms
===================
AquaButton has been tested on the following platforms:
* Windows (Windows XP);
* Linux Ubuntu (10.10).
Window Styles
=============
`No particular window styles are available for this class.`
Events Processing
=================
This class processes the following events:
================= ==================================================
Event Name Description
================= ==================================================
``wx.EVT_BUTTON`` Process a `wxEVT_COMMAND_BUTTON_CLICKED` event, when the button is clicked.
================= ==================================================
License And Version
===================
:class:`AquaButton` control is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 22 Nov 2011, 22.00 GMT
Version 0.4
"""
x = x + 1 # allow for border
BORDER = 1
x = x + BORDER
def allow_for_border(coordinate):
return coordinate + 1
y = allow_for_border(y)
def calc(num1, num2):
# calc product 2 numbers
return num1 + num2
def calculate_product(left, right):
return left * right
|
[
"akmiles@icloud.com"
] |
akmiles@icloud.com
|
976828ea55563b1986da76957c19a1fc536486b2
|
6364bb727b623f06f6998941299c49e7fcb1d437
|
/msgraph-cli-extensions/src/userscontacts/azext_userscontacts/vendored_sdks/userscontacts/aio/__init__.py
|
03db4e735a0c8c4b412b41f6a92f232c27276d81
|
[
"MIT"
] |
permissive
|
kanakanaidu/msgraph-cli
|
1d6cd640f4e10f4bdf476d44d12a7c48987b1a97
|
b3b87f40148fb691a4c331f523ca91f8a5cc9224
|
refs/heads/main
| 2022-12-25T08:08:26.716914
| 2020-09-23T14:29:13
| 2020-09-23T14:29:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._users_contacts_async import UsersContacts
__all__ = ['UsersContacts']
|
[
"japhethobalak@gmail.com"
] |
japhethobalak@gmail.com
|
3c699961c03db0286e4b397de0a722d189504754
|
30e2a85fc560165a16813b0486a862317c7a486a
|
/datastruct_algorithm/jan.py
|
bb5cbcfb654440320b08cce91cc4251879eb8dfd
|
[] |
no_license
|
muryliang/python_prac
|
2f65b6fdb86c3b3a44f0c6452a154cd497eb2d01
|
0301e8f523a2e31e417fd99a968ad8414e9a1e08
|
refs/heads/master
| 2021-01-21T11:03:48.397178
| 2017-09-18T04:13:27
| 2017-09-18T04:13:27
| 68,801,688
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,328
|
py
|
import time
import sys
def perform(a, b, goal, failset, trueset):
"""a is limaL, b is limbL, failset is a list failseting action"""
# time.sleep(1)
# print(a, b)
global lima
global limb
res = False
if a == goal or b == goal or a + b == goal:
return True
if res is False and a > 0 and b < limb:
ares = max(a - (limb-b), 0)
bres = min(limb, b + a)
if (ares , bres) not in failset:
failset.append((ares, bres) )
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("rmove")
if res is False and b > 0 and a < lima:
ares = min(lima, a + b)
bres = max(b - (lima-a), 0)
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("lmove")
if res is False and b > 0:
ares = a
bres = 0
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("drop b")
if res is False and a > 0:
ares = 0
bres = b
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("drop a")
if res is False and a < lima:
ares = lima
bres = b
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("fill a")
if res is False and b < limb:
ares = a
bres = limb
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("fill b")
# if res is False:
# print ("nothing true, return")
return res
failset = [(0,0)]
trueset = list()
lima = int(sys.argv[1])
limb = int(sys.argv[2])
goal = int(sys.argv[3])
if perform(0, 0, goal, failset, trueset):
print ("success")
else:
print ("fail")
print (list(reversed(trueset)))
|
[
"muryliang@gmail.com"
] |
muryliang@gmail.com
|
119da14a29035eb8a5b1c9ba0c64dc7cb316c170
|
fab39aa4d1317bb43bc11ce39a3bb53295ad92da
|
/nncf/tensorflow/graph/pattern_operations.py
|
23435d263c3de7adf57353e47709a005e220e0df
|
[
"Apache-2.0"
] |
permissive
|
dupeljan/nncf
|
8cdce27f25f01ce8e611f15e1dc3036fb8548d6e
|
0abfd7103ca212888a946ba4d0fbdb9d436fdaff
|
refs/heads/develop
| 2023-06-22T00:10:46.611884
| 2021-07-22T10:32:11
| 2021-07-22T10:32:11
| 388,719,455
| 0
| 0
|
Apache-2.0
| 2021-07-23T07:46:15
| 2021-07-23T07:43:43
| null |
UTF-8
|
Python
| false
| false
| 3,416
|
py
|
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.graph.patterns import merge_two_types_of_operations
from nncf.tensorflow.graph.metatypes.common import ELEMENTWISE_LAYER_METATYPES
from nncf.tensorflow.graph.metatypes.common import GENERAL_CONV_LAYER_METATYPES
from nncf.tensorflow.graph.metatypes.common import LAYER_METATYPES_AGNOSTIC_TO_DATA_PRECISION_WITH_ONE_INPUT
from nncf.tensorflow.graph.metatypes.common import LINEAR_LAYER_METATYPES
LINEAR_OPERATIONS = {'type': list(
{
*{layer_name for m in GENERAL_CONV_LAYER_METATYPES for layer_name in m.get_all_aliases()},
*{layer_name for m in LINEAR_LAYER_METATYPES for layer_name in m.get_all_aliases()},
}
),
'label': 'LINEAR'
}
ELEMENTWISE_OPERATIONS = {'type': list(set(
layer_name for m in ELEMENTWISE_LAYER_METATYPES for layer_name in m.get_all_aliases()
)),
'label': 'ELEMENTWISE'
}
QUANTIZATION_AGNOSTIC_OPERATIONS = {
'type': list(set(
layer_name for m in LAYER_METATYPES_AGNOSTIC_TO_DATA_PRECISION_WITH_ONE_INPUT for layer_name in m.get_all_aliases()
)),
'label': 'ELEMENTWISE'
}
BATCH_NORMALIZATION_OPERATIONS = {'type': ['BatchNormalization',
'SyncBatchNormalization',],
'label': 'BATCH_NORMALIZATION'
}
KERAS_ACTIVATIONS_OPERATIONS = {
'type': ['ReLU',
'ThresholdedReLU',
'ELU',
'PReLU',
'LeakyReLU',
'Activation'],
'label': 'KERAS_ACTIVATIONS'
}
TF_ACTIVATIONS_OPERATIONS = {
'type': ['Relu'],
'label': 'TF_ACTIVATIONS'
}
ATOMIC_ACTIVATIONS_OPERATIONS = merge_two_types_of_operations(KERAS_ACTIVATIONS_OPERATIONS,
TF_ACTIVATIONS_OPERATIONS,
'ATOMIC_ACTIVATIONS')
POOLING_OPERATIONS = {'type': ['AveragePooling2D',
'AveragePooling3D',
'GlobalAveragePooling2D',
'GlobalAveragePooling3D'],
'label': 'POOLING'}
SINGLE_OPS = merge_two_types_of_operations(POOLING_OPERATIONS,
{
'type': [
'Average',
'LayerNormalization',
'UpSampling2D'
]
}, label='SINGLE_OPS')
ARITHMETIC_OPERATIONS = {'type': ['__iadd__',
'__add__',
'__mul__',
'__rmul__'],
'label': 'ARITHMETIC'}
|
[
"noreply@github.com"
] |
noreply@github.com
|
3fb8bf3f113cfb1319ebe26fea72dc1f8a19c78b
|
c592c565b4f9259933738e7801e100f64227175a
|
/questions/migrations/0003_question_title.py
|
f247190d977f18d7fe37040ec80568f25e20ec86
|
[] |
no_license
|
ericak11/questions_app
|
1eb5d3dbd886d2fa0aaab7080a689b8f8b575934
|
85fecce9f4113841a36518fd4c5e5916ff68e964
|
refs/heads/master
| 2016-09-06T01:53:48.477642
| 2015-01-05T15:23:21
| 2015-01-05T15:23:21
| 27,886,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0002_auto_20141211_1832'),
]
operations = [
migrations.AddField(
model_name='question',
name='title',
field=models.CharField(default='exit', max_length=200),
preserve_default=False,
),
]
|
[
"kantor.erica@gmail.com"
] |
kantor.erica@gmail.com
|
2582af6476aaa57b460d1979815cf654b2556508
|
6ab022b549cfd105cedb3acd84679a663b770103
|
/home/migrations/0002_auto_20210123_0010.py
|
5a25e027d92b06540a1c45f6fc30104fb125e3bf
|
[
"Apache-2.0"
] |
permissive
|
d-shaktiranjan/WebifyTask
|
2b358d6b80bbe723f10a8ebabeb0ff4f701a2c53
|
188c9519dd5d9fe406ed7ce4e27dd491240302da
|
refs/heads/main
| 2023-04-23T17:07:38.930584
| 2021-05-18T05:48:10
| 2021-05-18T05:48:10
| 331,989,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
# Generated by Django 3.1.4 on 2021-01-22 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='alltask',
name='id',
),
migrations.AddField(
model_name='alltask',
name='task_id',
field=models.IntegerField(default=1, primary_key=True, serialize=False),
preserve_default=False,
),
migrations.AlterField(
model_name='alltask',
name='about',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='alltask',
name='subDateTime',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='alltask',
name='taskName',
field=models.CharField(max_length=30),
),
]
|
[
"debatashaktiranjan@gmail.com"
] |
debatashaktiranjan@gmail.com
|
582f4dc70b8e50a416444935ec568175df8bd5e6
|
c0f5512aa25f8a3ead1933d1faeaa1593716bc6c
|
/files/Python/10791756-djcasing-4.py
|
e73ce7d93b288a3a1f8842b1249da4844164fa98
|
[] |
no_license
|
harthur/detect-indent
|
89cc56d02257fad57f76c32f57091503932ce7a9
|
c5764cc72a32722adc9f3667f6565c8529a19557
|
refs/heads/master
| 2016-09-05T12:58:59.686824
| 2014-09-08T05:42:39
| 2014-09-08T05:42:39
| 18,378,730
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
import string
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
def case_insensitive(func, case='lower', code=301):
"""
Django view function decorator which can enforce the case of a URL path by
redirecting to the properly cased URL. This *allows* for case insensitive
matches while ensuring that only a commonly cased-URL is used and seen.
"""
def inner(request, *args, **kwargs):
if case not in ['lower', 'upper']:
raise ValueError("{0} is not a valid case function: use 'lower' or 'upper'".format(case))
if code not in [301, 302]:
raise ValueError("{0} is not a valid HTTP redirect code".format(code))
redirect_klass = HttpResponseRedirect if code == 301 else HttpResponsePermanentRedirect
cased_path = getattr(string, case)(request.path)
if request.path != cased_path:
url = cased_path
if 'QUERY_STRING' in request.META:
url = "{0}?{1}".format(url, request.META['QUERY_STRING'])
return redirect_klass(url)
return func(request, *args, **kwargs)
return inner
|
[
"fayearthur@gmail.com"
] |
fayearthur@gmail.com
|
201f20209bdbb0451b07c576336b8ce2de92ec95
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/DataSummary/tags/V00-00-05/src/cspad.py
|
12f8661db596a4ad88019f31c52e224d8b2a0a18
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,160
|
py
|
import os
import psana
import numpy
import logging
import event_process
import pylab
from mpi4py import MPI
from common import strtype
class cspad(event_process.event_process):
def __init__(self):
self.logger = logging.getLogger(__name__+'.cspad')
self.output = event_process.event_process_output()
self.output['in_report'] = None
self.output['in_report_title'] = None
self.frame = None
self.nframes = numpy.array([0])
self.reducer_rank = 0
return
def beginJob(self):
return
def add_frame(self,frame):
if self.frame == None:
self.frame = numpy.zeros_like(frame,dtype='float64')
self.frame += frame
self.nframes[0] += 1
return
def set_stuff(self,psana_src,psana_device,in_report=None,in_report_title=None):
self.src = psana.Source(psana_src)
self.dev = psana_device
self.output['in_report'] = in_report
self.output['in_report_title'] = in_report_title
def replicate_info(self):
args = ( str(self.src), strtype(self.dev) )
kwargs = { 'in_report': self.output['in_report'], 'in_report_title': self.output['in_report_title'] }
self.logger.info('args: {:}'.format(repr(args)))
self.logger.info('kwargs: {:}'.format(repr(kwargs)))
return ('set_stuff',args,kwargs)
def event(self,evt):
cspad = evt.get(self.dev, self.src)
a = []
for i in range(0,4):
quad = cspad.quads(i)
d = quad.data()
a.append(numpy.vstack([ d[j] for j in range(0,8) ]))
frame_raw = numpy.hstack(a)
self.add_frame(frame_raw)
return
def reduce(self,comm,ranks=[],reducer_rank=None,tag=None):
self.mergedframe = numpy.zeros_like( self.frame, dtype='float64' )
self.mergednframes = numpy.array([0])
if reducer_rank is None and tag is None:
self.mergedframe += self.frame
self.mergednframes[0] += self.nframes[0]
elif reducer_rank == comm.Get_rank() and tag is not None:
for r in ranks:
if r == reducer_rank:
self.mergedframe += self.frame
self.mergednframes[0] += self.nframes[0]
else :
self.mergedframe += comm.recv( source=r, tag=tag+1 )
self.mergednframes[0] += comm.recv( source=r, tag=tag+2)
elif reducer_rank != comm.Get_rank() and tag is not None:
comm.send( self.frame , dest=reducer_rank, tag=tag+1 ) # replace vals with something appropriate
comm.send( self.nframes, dest=reducer_rank, tag=tag+2 )
return
def endJob(self):
self.logger.info('mpi reducing cspad')
self.reduce(self.parent.comm,ranks=self.reduce_ranks,reducer_rank=self.reducer_rank,tag=66)
if self.parent.rank == self.reducer_rank:
self.output['figures'] = {'mean': {}, 'mean_hist': {}, }
fig = pylab.figure()
self.avg = self.mergedframe/float(self.mergednframes[0])
pylab.imshow(self.avg)
pylab.colorbar()
self.flat = self.avg.flatten()
pylab.clim(self.flat.mean()-2.*self.flat.std(),self.flat.mean()+2.*self.flat.std())
pylab.title('CSPAD average of {:} frames'.format(self.nframes))
pylab.savefig( os.path.join( self.output_dir, 'figure_cspad.png' ))
self.output['figures']['mean']['png'] = os.path.join( self.output_dir, 'figure_cspad.png')
fig.clear()
pylab.hist(self.flat,1000)
pylab.xlim(self.flat.mean()-2.*self.flat.std(),self.flat.mean()+2.*self.flat.std())
pylab.title('histogram')
pylab.savefig( os.path.join( self.output_dir, 'figure_cspad_hist.png' ))
self.output['figures']['mean_hist']['png'] = os.path.join( self.output_dir, 'figure_cspad_hist.png')
del fig
self.parent.output.append(self.output)
return
|
[
"justing@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
justing@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
4853fa85b7e28aec71a8c26dce4015d65e359101
|
401ad378a3c5cf2faaa692680dba29726c7193d5
|
/preprocess_data.py
|
ba74d189f25e56c88448b036e1f9eb406d51dff5
|
[] |
no_license
|
thaophung/ASL-recognition-Hololens-
|
75293a214dbf067ec799b5b0852dfcca840714b0
|
e90c546a6f27838be29e7f173baf685ef090dc92
|
refs/heads/master
| 2021-09-14T00:56:06.164850
| 2018-05-06T20:30:52
| 2018-05-06T20:30:52
| 113,247,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,202
|
py
|
import numpy as np
import scipy.misc
import os, cv2, random
import shutil
def combine_list_txt(data_path):
trainlisttxt = 'trainlist.txt'
vallisttxt = 'vallist.txt'
#testlisttxt = 'testlist.txt'
trainlist = []
txt_path = os.path.join(data_path, trainlisttxt)
with open(txt_path) as fo:
for line in fo:
trainlist.append(line[:line.rfind(' ')])
vallist = []
txt_path = os.path.join(data_path, vallisttxt)
with open(txt_path) as fo:
for line in fo:
vallist.append(line[:line.rfind(' ')])
return trainlist, vallist
def regenerate_data(data_path):
sequence_length = 10
image_size = (224,224,3)
dest_dir = os.path.join(data_path, 'npy_dataset_2')
# generate sequence for optical flow
preprocessing(data_path, dest_dir, sequence_length, image_size, overwrite=True,
normalization=False, mean_subtraction=False, horizontal_flip=False,
random_crop=False, consistent=False, continuous_seq=True)
# compute optical flow data
def preprocessing(data_path, dest_dir, seq_len, img_size, overwrite=False,
normalization=False, mean_subtraction=False, horizontal_flip=False,
random_crop=False, consistent=False, continuous_seq=True):
'''
Extract video data to sequence of fixed length, and save it in npy file
:param list_dir
:param data_dir
:param seq_len
:param img_size:
:param overwrite:
:param normalizaation: normalize to (0,1)
:param mean_subtraction: subtract mean of RGB channels
:param horizontal_flip: add random noise to sequence data
:param random_crop: cropping using random location
:param consistent: whether horizontal flip, random crop is consistent in sequence
:param continuous_seq: whether frames extracted are continuous
:return:
'''
#write a txt file to keep parameter inforamtion
txt_file = os.path.join(dest_dir,'parameters.txt')
with open(txt_file,'w') as fo:
fo.write('seq_len: ' + str(seq_len) +
'\noverwrite: ' + str(overwrite) +
'\nnormalization: ' + str(normalization) +
'\nmean_subtraction: ' + str(mean_subtraction) +
'\nhorizontal_flip: ' + str(horizontal_flip) +
'\nrandom_crop: ' + str(random_crop) +
'\nconsistent: ' + str(consistent) +
'\ncontinuous_seq: ' + str(continuous_seq))
trainlist, vallist = combine_list_txt(data_path)
train_src = os.path.join(data_path, 'train')
val_src = os.path.join(data_path, 'val')
train_dir = os.path.join(dest_dir, 'train')
val_dir = os.path.join(dest_dir, 'val')
#os.mkdir(train_dir)
#os.mkdir(val_dir)
if mean_subtraction:
mean = calc_mean(UCF_dir, img_size).astype(dtype='float16')
np.save(os.path.join(dest_dir, 'mean.npy'), mean)
else:
mean = None
print('Preprocessing ASL data ....')
for clip_list, sub_dir in [(trainlist, train_dir)]: #, (vallist, val_dir)]:
for clip in clip_list:
clip_name = os.path.basename(clip)
clip_category = os.path.dirname(clip)
category_dir = os.path.join(sub_dir, clip_category)
if sub_dir == train_dir:
src_dir = os.path.join(train_src, clip)
else:
src_dir = os.path.join(val_src, clip)
dst_dir = os.path.join(category_dir, clip_name)
if not os.path.exists(category_dir):
os.mkdir(category_dir)
process_clip(clip_category, src_dir, dst_dir, seq_len, img_size, mean=mean,
normalization=normalization, horizontal_flip=horizontal_flip,
random_crop=random_crop, consistent=consistent,
continuous_seq=continuous_seq)
print("Processing done...")
# down sample image resolution to 216*216, and make sequence length 10
def process_clip(clip_category, src_dir, dst_dir, seq_len, img_size, mean=False, normalization=False,
horizontal_flip=False, random_crop=False, consistent=False, continuous_seq=False):
all_frames = []
cap = cv2.VideoCapture(src_dir)
while cap.isOpened():
succ, frame = cap.read()
if not succ:
break
# append frame that is not all zeros
if frame.any():
all_frames.append(frame)
clip_length = len(all_frames)
# save all frames
if seq_len is None or clip_length <= 10 or clip_category =='j' or clip_category == 'z':
#print('normal ' + src_dir)
print(src_dir)
all_frames = np.stack(all_frames, axis=0)
dst_dir = os.path.splitext(dst_dir)[0] + '.npy'
np.save(dst_dir, all_frames)
else:
step_size = int(clip_length / (seq_len))
frame_sequence = []
# select random first frame index for continous sequence
if continuous_seq:
start_index = random.randrange(clip_length-seq_len + 1)
# choose whether to flip or not for all frames
if not horizontal_flip:
flip = False
elif horizontal_flip and consistent:
flip = random.randrange(2) == 1
if not random_crop:
x, y = None, None
xy_set = False
for i in range(seq_len):
if continuous_seq:
index = start_index + i
else:
index = i * step_size + random.randrange(step_size)
frame = all_frames[index]
# compute flip for each frame
if horizontal_flip and not consistent:
flip = random.randrange(2) == 1
if random_crop and consistent and not xy_set:
x = random.randrange(frame.shape[0] - img_size[0])
y = random.randrange(frame.shape[1] - img_size[1])
xy_set = True
elif random_crop and not consistent:
x = random.randrange(frame.shape[0] - img_size[0])
y = random.randrange(frame.shape[1] - img_size[1])
frame = process_frame(frame, img_size, x, y, mean=mean,
normalization=normalization, flip=flip,
random_crop=random_crop)
frame_sequence.append(frame)
frame_sequence = np.stack(frame_sequence, axis=0)
dst_dis = os.path.splitext(dst_dir)[0] + '.npy'
np.save(dst_dir, frame_sequence)
cap.release()
def process_frame(frame, img_size, x, y, mean=None, normalization=True, flip=True,
random_crop=False):
if not random_crop:
frame = scipy.misc.imresize(frame, img_size)
else:
frame = frame[x:x+img_size[0], y:y+img_size[1],:]
# flip horizontally
if flip:
frame = frame[:, ::-1, :]
frame = frame.astype(dtype='float16')
if mean is not None:
frame -=mean
if normalization:
frame /= 255
return frame
if __name__ == '__main__':
'''
extract frames from videos as npy files
'''
#sequence_length = None
#image_size = (216,216,3)
data_path = '/Users/thaophung/workspace/senior_design/dataset'
regenerate_data(data_path)
|
[
"noreply@github.com"
] |
noreply@github.com
|
a1be5212f3088beec374525a35b087342ba34213
|
f269ca79b405a05a7a5b715b0fb6a416e78ddbea
|
/process_SimRNA_results.py
|
3b932be33b95ca9350a81644b5876fa245427cbb
|
[] |
no_license
|
fryzjergda/simrna_scripts
|
ccccc3aa5811d2925aa29e765a30ec4f4add7f93
|
f6f81c586046d12d74ed592c7a5bbe9f3eed8178
|
refs/heads/master
| 2020-05-25T05:27:26.603044
| 2020-05-16T11:14:57
| 2020-05-16T11:14:57
| 187,649,955
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,882
|
py
|
#!/usr/bin/python
#created, Michal Boniecki, for automatic processing results from SimRNA runs, 2015.12.14
#this script prepares processes output data from SimRNA runs
import sys, os, shutil
from glob import glob
if(len(sys.argv) < 2):
print >>sys.stderr, "usage: process_SimRNA_results.py job_id_name"
print >>sys.stderr, "note: job_id_name should be legal string, to be used to set up working directory for this job"
print >>sys.stderr, "note: job_id_name should be the same, as used in the script that launches simulations"
sys.exit(1)
WORKING_DIR = "WORKING_SPACE"
RUN_MASK = "run_??"
PROCESSING_DIR = "processing_results"
JOB_ID_NAME = sys.argv[1]
JOB_PATH = WORKING_DIR+"/"+JOB_ID_NAME
ALL_TRAFL_filename = JOB_ID_NAME+"_ALL.trafl"
PDB_REFERENCE_FILE_FOR_TRAFL_CONVERSION = JOB_ID_NAME+"_run_01_01-000001.pdb"
PDB_REFERENCE_PATH_FOR_TRAFL_CONVERSION = "../run_01/"+PDB_REFERENCE_FILE_FOR_TRAFL_CONVERSION
ALL_TRAFL_low_size_thrs = 1024 # 1kb for the time being
FRACTION_LOWEST_ENERGY_FRAMES_TO_CLUSTER = 0.01
OUTPUT_PDBS_DIR = "output_PDBS"
if(os.path.exists(JOB_PATH) == False):
print >>sys.stderr, "expected path: "+JOB_PATH+" doesn't exist"
print >>sys.stderr, "this path should exist for given job_id_name: "+JOB_ID_NAME
sys.exit(2)
dir_list = glob(JOB_PATH+"/"+RUN_MASK)
dir_list.sort()
#print dir_list
n_runs = len(dir_list)
if(n_runs == 0):
print >>sys.stderr, "inside directory: "+JOB_PATH+" there are no expected directories named: "+RUN_MASK+" where ?? is numbering field: 01, 02, 03 ..."
sys.exit(3)
print >>sys.stderr, "number of run directories detected: "+str(n_runs)
os.chdir(JOB_PATH)
print >>sys.stderr, "making directory: "+JOB_PATH+"/"+PROCESSING_DIR
if(os.path.exists(PROCESSING_DIR) == False):
os.mkdir(PROCESSING_DIR)
else:
print >>sys.stderr, "requested subdirectory: "+PROCESSING_DIR+" already exists"
print >>sys.stderr, "check it, maybe delete it, ... program termination"
sys.exit(4)
os.chdir(PROCESSING_DIR)
command = "cat ../"+RUN_MASK+"/*.trafl > "+ALL_TRAFL_filename
print >>sys.stderr, "being in "+JOB_PATH+"/"+PROCESSING_DIR+" running command:"
print >>sys.stderr, command
os.system(command)
if(os.path.isfile(ALL_TRAFL_filename) == False):
print >>sys.stderr, "expected (from previous step) file: "+ALL_TRAFL_filename+" doesn't exist"
sys.exit(5)
file_size = os.path.getsize(ALL_TRAFL_filename)
if(file_size < ALL_TRAFL_low_size_thrs):
print >>sys.stderr, "file: "+ALL_TRAFL_filename+" is too small: "+str(file_size)+" bytes"
print >>sys.stderr, "it seems the file contains no data, something went wrong before ..."
sys.exit(6)
# some tests if ALL_TRAFL_filename is correct (sometimes there are problems, when there is no disk space during SimRNA running)
# if ALL_TRAFL_filename is not correct, it should be repaired here
inpfile = open(ALL_TRAFL_filename)
first_line = inpfile.readline()
second_line = inpfile.readline().rstrip()
inpfile.close()
#assuming that second line in file ALL_TRAFL_filename is first line containing coordinated, thus is possible to calculate the size of system (seq length)
#by dividing of number of items by 15 (3 coordinates x,y,z and 5 atoms per nucleotide)
coords_list = second_line.split()
seq_length = len(coords_list) / 15
if(seq_length < 4):
print >>sys.stderr, "it seems that seq_lenght detected from file: "+ALL_TRAFL_filename+" is too low"
print >>sys.stderr, "something went wrong"
sys.exit(7)
print >>sys.stderr, "clustering ... assuming:"
print >>sys.stderr, "--- fraction of lowest energy frames to clustering: "+str(FRACTION_LOWEST_ENERGY_FRAMES_TO_CLUSTER)
rmsd_thrs = 0.1*float(seq_length)
rmsd_thrs_str = "%.1f" % rmsd_thrs
print >>sys.stderr, "--- rmsd thrs for clustering 0.1*seq_lenght which is: "+rmsd_thrs_str
command = "../../../bin/clustering "+ALL_TRAFL_filename+" "+str(FRACTION_LOWEST_ENERGY_FRAMES_TO_CLUSTER)+" "+rmsd_thrs_str+" > clustering.log 2>&1"
print >>sys.stderr, command
os.system(command)
clust_1_2_3_names = glob("*clust0[1-3].trafl")
clust_1_2_3_names.sort()
n_clusts = len(clust_1_2_3_names)
print >>sys.stderr, "number of clusters to process: "+str(n_clusts)
print >>sys.stderr, "extracting pdbs, reconstructing all atom representation"
print >>sys.stderr, "creating symlink 'data'"
os.symlink("../../../data","data")
if(os.path.exists(PDB_REFERENCE_PATH_FOR_TRAFL_CONVERSION) == False):
print >>sys.stderr, "expected file at location: "+PDB_REFERENCE_PATH_FOR_TRAFL_CONVERSION+" doesn't exist"
sys.exit(8)
for curr_clust_name in clust_1_2_3_names:
clust_reconstr_log_name = curr_clust_name.replace(".trafl",".log")
command = "../../../bin/SimRNA_trafl2pdbs "+PDB_REFERENCE_PATH_FOR_TRAFL_CONVERSION+" "+curr_clust_name+" 1 AA > "+clust_reconstr_log_name+" 2>&1"
print >>sys.stderr, command
os.system(command)
os.chdir("..")
print >>sys.stderr, "making directory: "+OUTPUT_PDBS_DIR
if(os.path.exists(OUTPUT_PDBS_DIR) == False):
os.mkdir(OUTPUT_PDBS_DIR)
else:
print >>sys.stderr, "directory already exists"
os.chdir(PROCESSING_DIR)
pdbs_list = glob("*.pdb")
#pdbs_AA_list = glob("*_AA.pdb")
ss_detected_list = glob("*.ss_detected")
if(len(pdbs_list) < 0):
print >>sys.stderr, "inside directory: "+PROCESSING_DIR+" there is no pdb files, something when wrong in previous step"
sys.exit(9)
else:
pdbs_list.sort()
print >>sys.stderr, "detected pdb files in: "+PROCESSING_DIR+":"
for curr_pdb_name in pdbs_list:
print >>sys.stderr, curr_pdb_name
print >>sys.stderr, "copying pdb and ss_detected files to: "+OUTPUT_PDBS_DIR+" just to store them there"
for curr_pdb_name in pdbs_list:
shutil.copy(curr_pdb_name, "../"+OUTPUT_PDBS_DIR)
for curr_ss_detected_name in ss_detected_list:
shutil.copy(curr_ss_detected_name, "../"+OUTPUT_PDBS_DIR)
os.chdir("..")
print >>sys.stderr, "DONE :-)"
|
[
"twirecki@genesilico.pl"
] |
twirecki@genesilico.pl
|
44e327bc73b3bedf3e3fae4de740348f8067cb99
|
671a669cc862f68d736a98b3d95bedf96cd7b09e
|
/Coreference/PDT/pdt_clusterization.py
|
b66174d38841135639be5bd2405124be4e692b1d
|
[] |
no_license
|
Jankus1994/Coreference
|
e258b68c0a75ee3102614220f27c5d163e745c41
|
41b13ce6422ac6c3d139474641e75e502c446162
|
refs/heads/master
| 2021-01-23T01:55:56.732336
| 2018-05-03T18:06:40
| 2018-05-03T18:06:40
| 85,945,883
| 0
| 1
| null | 2017-03-23T13:08:14
| 2017-03-23T12:15:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,610
|
py
|
# Jan Faryad
# 23. 3. 2017
#
# pdt module to conversion of the coreferennce information from chains to clusters
class PDT_clusterization:
def __init__( self, list_of_corefs, init_cluster_id):
self.list_of_corefs = list_of_corefs
self.list_of_cluster_tuples = []
self.cluster_id = init_cluster_id # id of coreference clusters
def convert_chains_to_clusters( self):
"""
called from outside
"""
for record in self.list_of_corefs:
cluster_ID = self.find_coref_cluster( record.coref_ID)
if ( cluster_ID == None ):
# only for heads of the chains - their referents must be process seperately, as their don't have their own records
cluster_ID = self.new_cluster()
coref_cluster_record = (
record.coref_ID[0], record.coref_ID[1], record.coref_ID[2], cluster_ID, record.coref_dropped)
# paragraph ID, sentence ID, word ID
self.list_of_cluster_tuples.append( coref_cluster_record)
own_cluster_record = (
record.own_ID[0], record.own_ID[1], record.own_ID[2], cluster_ID, record.own_dropped)#, record.perspron)
# paragraph ID, sentence ID, word ID
self.list_of_cluster_tuples.append( own_cluster_record)
self.list_of_cluster_tuples = sorted( self.list_of_cluster_tuples)
list_of_cluster_records = [] # building object-records from tuples
for tuple in self.list_of_cluster_tuples:
cluster_record = Cluster_record( tuple)
list_of_cluster_records.append( cluster_record)
return list_of_cluster_records
def find_coref_cluster( self, ids): # -> int (cluster number)
"""
if this cluster was already used, returns its number. otherwise None - then will the caller set up a new cluster
"""
for tuple in self.list_of_cluster_tuples:
if ( ( tuple[0], tuple[1], tuple[2] ) == ids ):
return tuple[3]
return None
def new_cluster( self):
self.cluster_id += 1
return self.cluster_id
def get_cluster_id( self):
return self.cluster_id
class Cluster_record:
def __init__( self, tuple):
self.para_ID = tuple[0]
self.sent_ID = tuple[1]
self.word_ID = tuple[2]
self.cluster_ID = tuple[3]
self.dropped = tuple[4]
#self.perspron
|
[
"noreply@github.com"
] |
noreply@github.com
|
e5fef46757ac950b98d7bcd278ede4619963d734
|
11baa71016e83145d00f4b35316c77d40c205b6f
|
/lib/GA.py
|
ec1dbb10445469fc91ce3a859b60e4f2f8daefdb
|
[] |
no_license
|
Syndorik/Genetic-Algorithm
|
5b61b5fea39e8042c66f669d7b24fbf3b9c8e43e
|
bb8899cba83489576ef61da1d5284ddea4a9a2a4
|
refs/heads/master
| 2020-04-25T16:53:02.096595
| 2019-03-10T19:34:36
| 2019-03-10T19:34:36
| 172,927,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,215
|
py
|
import random
import copy
import os
import time
import math
import csv
from Tree import Tree
from TreePop import TreePop
import copy
from joblib import Parallel, delayed
import joblib
"""
try:
from tkinter import *
from tkinter.ttk import *
except Exception as e:
print("[ERROR]: {0}".format(e))
from Tkinter import *
"""
class GA:
def __init__(self,list_files,k_mut_prob = 0.4, k_crossover = 3, tournament_size=7, elitism =True, method = "swap"):
self.k_mut_prob = k_mut_prob
self.tournament_size = tournament_size
self.elitism = elitism
self.list_files = list_files
self.k_crossover = k_crossover
self.nodeNum = list_files[0][0][0]
self.method = method
@staticmethod
def swap(tree, mut_pos1, mut_pos2):
tmp_tree = copy.deepcopy(tree)
# if they're the same, skip to the chase
if mut_pos1 == mut_pos2:
return tmp_tree
# Otherwise swap them:
hub1 = tmp_tree.prufer[mut_pos1]
hub2 = tmp_tree.prufer[mut_pos2]
tmp_tree.prufer[mut_pos2] = hub1
tmp_tree.prufer[mut_pos1] = hub2
return tmp_tree
@staticmethod
def swap_2opt(tree, mut_pos1, mut_pos2):
tmp_tree = copy.deepcopy(tree)
# if they're the same, skip to the chase
if mut_pos1 == mut_pos2:
return tmp_tree
if(mut_pos1>mut_pos2):
tmp = mut_pos1
mut_pos1 = mut_pos2
mut_pos2 = tmp
fp = [tree.prufer[l] for l in range(mut_pos1)]
lp = [tree.prufer[l] for l in range(mut_pos2+1, len(tree.prufer))]
middle = [tree.prufer[l] for l in range(mut_pos2,mut_pos1-1,-1)]
tree.prufer = fp+middle+lp
return tree
def tournament_select(self, population):
'''
TreePop() --> Tree(),TreePop()
Randomly selects tournament_size amount of Tree() from the input population.
Takes the fittest from the smaller number of Tree().
Principle: gives worse Tree() a chance of succeeding, but favours good Tree()
The first argument is the Tree with the best fitness, we call this func over and over until we have a new population
'''
# New smaller population (not intialised)
tournament_pop = TreePop(self.tournament_size,self.list_files)
# fills it with random individuals (can choose same twice)
for i in range(self.tournament_size-1):
tournament_pop.tree_pop.append(random.choice(population.tree_pop))
# returns the fittest:
return tournament_pop.get_fittest(),tournament_pop
def crossover_kpoint(self, parent1, parent2):
"""
Same as crossover_random. But this time there are k points and not just a start_pos and end_pos
"""
# new child
child_Tree = Tree(self.list_files, prufer = [None for k in range(self.nodeNum-2)])
#k_crossover random point
k_rd_point = []
while((len(list(set(k_rd_point)))!= self.k_crossover) and (len(k_rd_point) !=self.k_crossover)):
tmp = random.randint(0,len(parent1.prufer))
if tmp not in k_rd_point:
k_rd_point.append(tmp)
k_rd_point.sort()
start = 0
cpt = 0
parent_to_choose = [parent1, parent2]
#Creating the child prufer sequence. If k_crossover = 3, we have [parent1,parnet2,parent1]
for end in k_rd_point:
for i in range(start,end):
child_Tree.prufer[i] = parent_to_choose[cpt%2].prufer[i]
start = end
cpt+=1
#Replce the last None with the parents who should be last
for i in range(len(parent2.prufer)):
# complete the prufer sequence with parent2
if child_Tree.prufer[i] == None :
child_Tree.prufer[i] = parent_to_choose[cpt%2].prufer[i]
child_Tree.calc_fit()
return child_Tree
def crossover_random(self, parent1, parent2):
'''
Tree(), Tree() --> Tree()
Returns a child tree Tree() after breeding the two parent Tree.
Trees must be of same length.
Breeding is done by selecting a random range of parent1, and placing it into the empty child route (in the same place).
Gaps are then filled in, without duplicates, in the order they appear in parent2.
For example:
parent1: 0123456789
parent1: 5487961320
start_pos = 0
end_pos = 4
unfilled child: 01234*****
filled child: 0123458796
* = None
'''
# new child
child_Tree = Tree(self.list_files, prufer = [None for k in range(self.nodeNum-2)])
# Two random integer indices of the parent1:
start_pos = random.randint(0,len(parent1.prufer))
end_pos = random.randint(0,len(parent1.prufer))
#### takes the sub-route from parent one and sticks it in itself:
# if the start position is before the end:
if start_pos < end_pos:
# do it in the start-->end order
for x in range(start_pos,end_pos):
child_Tree.prufer[x] = parent1.prufer[x] # set the values to eachother
# if the start position is after the end:
elif start_pos > end_pos:
# do it in the end-->start order
for i in range(end_pos,start_pos):
child_Tree.prufer[i] = parent1.prufer[i] # set the values to eachother
# For the None values, replace it with parent2
for i in range(len(parent2.prufer)):
# complete the prufer sequence with parent2
if child_Tree.prufer[i] == None :
child_Tree.prufer[i] = parent2.prufer[i]
# returns the child route (of type Route())
child_Tree.calc_fit()
return child_Tree
def mutate(self, tree_mut):
'''
Tree() --> Tree()
Swaps two random indexes in the childs prufer sequence.
Runs k_mut_prob*100 % of the time
'''
tmp_tree = copy.deepcopy(tree_mut)
# k_mut_prob %
if random.random() < self.k_mut_prob:
# two random indices:
mut_pos1 = random.randint(0,len(tmp_tree.prufer)-1)
mut_pos2 = random.randint(0,len(tmp_tree.prufer)-1)
print("ind1 : {}".format(mut_pos1))
print("ind2 : {}".format(mut_pos2))
tmp_tree = GA.swap(tmp_tree,mut_pos1,mut_pos2)
# Recalculate the length of the route (updates it's .length)
tmp_tree.calc_fit()
return tmp_tree
def mutate_swap(self,tree_mut):
'''
Tree() --> Tree()
Swaps two random indexes in route_to_mut.route. Here it's more intelligent since the swap is effective only if the fitness function
after swap is lower.
This method allows us to have a good local search on solutions
Runs k_mut_prob*100 % of the time
'''
tree = copy.deepcopy(tree_mut)
tree.calc_fit()
# k_mut_prob %
breakk = False
lenn = len(tree.prufer)
if random.random() < self.k_mut_prob:
for i in range(lenn):
for j in range(lenn): # i is a, i + 1 is b, j is c, j+1 is d
tmp_tree = GA.swap(tree,i,j)
tmp_tree.calc_fit()
if(tree.fitness > tmp_tree.fitness):
tree = tmp_tree
breakk = True
break
if breakk:
breakk = False
break
tree.calc_fit()
return tree
def mutate_swap_nerfed(self,tree_mut):
'''
Tree() --> Tree()
Swaps two random indexes in route_to_mut.route. Here it's more intelligent since the swap is effective only if the fitness function
after swap is lower.
This method allows us to have a good local search on solutions
Runs k_mut_prob*100 % of the time
'''
tree = copy.deepcopy(tree_mut)
tree.calc_fit()
# k_mut_prob %
breakk = False
lenn = len(tree.prufer)
if random.random() < self.k_mut_prob:
list_indices = list(set(random.choices(list(range(lenn)), k = int(self.nodeNum/3))))
allposs = []
for i in list_indices:
for j in range(lenn):
tmp_tree = GA.swap(tree,i,j)
tmp_tree.calc_fit()
allposs.append(tmp_tree)
if(tree.fitness > tmp_tree.fitness):
tree = tmp_tree
breakk = True
break
if breakk:
breakk = False
break
if not breakk:
tree = sorted(allposs, key=lambda x: x.fitness, reverse=False)[0]
tree.calc_fit()
return tree
def fittest_swap(self,tree_mut):
'''
Tree() --> Tree()
Swaps two random indexes in route_to_mut.route. Here it's more intelligent since the swap is effective only if the fitness function
after swap is lower.
This method allows us to have a good local search on solutions
This is Local Search at the end of each generation
Runs k_mut_prob*100 % of the time
'''
tree = copy.deepcopy(tree_mut)
tree.calc_fit()
lenn = len(tree.prufer)
def toparall(i,j):
tmp_tree = GA.swap(tree,i,j)
tmp_tree.calc_fit()
return tmp_tree
if random.random() < self.k_mut_prob:
for i in range(lenn):
possibilities = joblib.Parallel(n_jobs=-1)(delayed(toparall)(i,j) for j in range(lenn))
tmplist = sorted(possibilities, key=lambda x: x.fitness, reverse=False)
tree = tmplist[0]
return tree
def mutate_2opt(self, tree_mut):
'''
Tree() --> Tree()
Doing a 2opt swap. We're keeping the best swap among the possibles 2opt swap. The fitness can be lower than the original one
This method allows us to have a good local search on solutions
Runs k_mut_prob*100 % of the time
'''
tree = copy.deepcopy(tree_mut)
tree.calc_fit()
# k_mut_prob %
lenn = len(tree.prufer)
def parallel(i,j):
tmp_tree = GA.swap_2opt(tree,i,j)
tmp_tree.calc_fit()
return tmp_tree
if random.random() < self.k_mut_prob:
list_indices = list(set(random.choices(list(range(lenn)), k = int(self.nodeNum/4))))
allposs =[]
for i in list_indices:
possibilities = joblib.Parallel(n_jobs=-1)(delayed(parallel)(i,j) for j in range(lenn))
tmplist = sorted(possibilities, key=lambda x: x.fitness, reverse=False)
allposs.append(tmplist[0])
tmplist = sorted(allposs, key=lambda x: x.fitness, reverse=False)
tree = tmplist[0]
tree.calc_fit()
return tree
def change_three_bests(self, population):
"""
TreePop() --> TreePop()
Change the first three best trees (in term of fitness). We're testing every swap possible.
### @TODO do it until there are 3 different trees
"""
population.sort_treepop()
#for k in range(3):
# population.tree_pop[k] = self.fittest_swap(population.tree_pop[k])
cpt = 0
k = 0
lenn = len(population.tree_pop)
done = []
while(cpt< 5 and k< lenn):
print(k)
population.tree_pop[k] = self.fittest_swap(population.tree_pop[k])
if(population.tree_pop[k] not in done):
cpt+=1
done.append(population.tree_pop[k])
k+=1
return population
def evolve_population(self, init_pop):
'''
TreePop() --> TreePop()
Takes a population and evolves it then returns the new population.
'''
#makes a new population:
descendant_pop = TreePop(list_files = self.list_files, size=init_pop.size, initialise=True)
# Elitism offset (amount of Tree() carried over to new population)
elitismOffset = 0
# if we have elitism, set the first of the new population to the fittest of the old
if self.elitism:
descendant_pop.tree_pop[0] = init_pop.fittest
elitismOffset = 1
# Goes through the new population and fills it with the child of two tournament winners from the previous populatio
for x in range(elitismOffset,descendant_pop.size):
# two parents:
tournament_parent1 = self.tournament_select(init_pop)[0]
tournament_parent2 = self.tournament_select(init_pop)[0]
while(tournament_parent2 == tournament_parent1):
tournament_parent2 = self.tournament_select(init_pop)[0]
# A child:
tournament_child = self.crossover_kpoint(tournament_parent1, tournament_parent2)
# Fill the population up with children
descendant_pop.tree_pop[x] = tournament_child
# Mutates all the Tree (mutation with happen with a prob p = k_mut_prob)
# tre_ind in range(len(descendant_pop.tree_pop)):
# descendant_pop.tree_pop[tre_ind] = self.mutate_2opt_nerfed(descendant_pop.tree_pop[tre_ind])
if(self.method == "swap"):
func = self.mutate_swap_nerfed
elif (self.method == "opt"):
func = self.mutate_2opt
tmp = Parallel(n_jobs=-1)(delayed(func)(descendant_pop.tree_pop[tre_ind]) for tre_ind in range(len(descendant_pop.tree_pop)))
descendant_pop.tree_pop = tmp
# Update the fittest Tree:
#descendant_pop.sort_treepop()
#descendant_pop.tree_pop[0] = self.fittest_2opt(descendant_pop.tree_pop[0])
descendant_pop.get_fittest()
return descendant_pop
|
[
"alexandre.allani@telecom-bretagne.eu"
] |
alexandre.allani@telecom-bretagne.eu
|
c8f2954d5848b31990dc4b5336d0ef62f6f1881e
|
1bf03446a30dbc51c0249c41a7b457581725dfed
|
/client.py
|
23473e0b776bb2dc84bf59cf407825c12e3dbbe1
|
[] |
no_license
|
ichbinkenny/redefined-destruction-networking
|
567ca8ee643be53154d7f1072c95c70ca375b078
|
fc92f4957e3734226324dd1678d0c9306c80771c
|
refs/heads/master
| 2023-01-31T12:35:11.238835
| 2020-12-09T21:48:02
| 2020-12-09T21:48:02
| 273,066,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,725
|
py
|
import socket
import threading
import sys
import select
client_socket = None
address = "asimplenerd.com"#"192.168.72.1" # This will be the server's address on the local network. It is imperative that the client is already on the hostapd network!
default_port = 1287
id = -1
end_flag = True
close_message = "PLZCLOSENOW"
ACK = 'e'
NACK = 'f'
game_in_progress = False
message_size = 256
READY = 1
BUSY = 2
DEV_ADDED = 3
DEV_REMOVED = 4
ENTER_COMBAT = 5
EXIT_COMBAT = 6
### setupClient test notes
# Clients trying to connect to the server improperly, i.e. on server close all terminate as appropriate.
# Received IDs are not used until verified by an ACK from the server!
# any non-ack status from the server simply disconnects this client! This is good!
# ACKs from client IDs allow for a bot connection and start a game loop for the client to receive messages
# and send updates!
def setupClient():
global client_socket
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Attempting to connect to server...")
client_socket.connect((address, default_port))
id = int.from_bytes(client_socket.recv(message_size), byteorder='big')
status = client_socket.recv(message_size).decode('utf-8')
if status == ACK:
# Send id back to bluetooth device
sys.stdout.write("id: %d\n" % id)
sys.stdout.flush()
beginConnLoop()
elif status == NACK:
print("Failed to get id!")
else:
print("UNKNOWN STATUS: {}".format(str(status)))
### readDevUpdates test notes
# until this client is instructed to stop running, updates are being read from BluetoothControl correctly.
# on client termination, this function cleans up its resources nicely!
# all four handled commands are properly recognized on the server end!
# I.e. DEV_ADDED is producing 3: device id, DEV_REMOVE is producing 4: device id
def readDevUpdates():
global end_flag
while not end_flag:
info = sys.stdin.readline().strip()
status = "NONE"
if ':' in info:
status = int(info[:info.index(':')])
if status == DEV_ADDED:
client_socket.sendall(bytes(info, 'utf-8'))
elif status == DEV_REMOVED:
client_socket.sendall(bytes(info, 'utf-8'))
elif status == ENTER_COMBAT:
client_socket.sendall(bytes(info, 'utf-8'))
elif status == EXIT_COMBAT:
client_socket.sendall(bytes(info, 'utf-8'))
else:
client_socket.sendall(bytes(info, 'utf-8'))
### Test notes
# On start, update thread launches successfully!
# Server registers components as 0:0:0:Sword to allow for proper updating!
# Reads are only happening when data is available to save resources!
# On receiving the close request from the server, this client is properly cleaned up!
# Data received is being registered in BluetoothControl from this file's pipe!
def beginConnLoop():
global end_flag
end_flag = False
bot_updated_thread = threading.Thread(target=readDevUpdates)
bot_updated_thread.setDaemon(True)
bot_updated_thread.start()
components = "0:0:0:Sword" # No armor and sword weapon
client_socket.sendall(bytes(components, 'utf-8'))
while not end_flag:
read_list, write_list, err = select.select([client_socket], [], [])
for sock in read_list:
msg = sock.recv(message_size).decode('utf-8')
end_flag = msg == close_message
if end_flag:
sys.stdout.write("SOCKCLOSED")
client_socket.close()
break
else:
sys.stdout.write(msg + "\n")
sys.stdout.flush()
### This works!
if __name__ == "__main__":
setupClient()
|
[
"kennethdhunter@gmail.com"
] |
kennethdhunter@gmail.com
|
ccc5024e30b508be56f625b5cc2eb06203b0013e
|
f39d88e77080198681994a65fb0137523e23895c
|
/module_00.py
|
847c34d9966b7b3fbee3d61c7dd2e52d80ffa7ed
|
[] |
no_license
|
v-mail-81/Project_0
|
536901b1c8e517daa48982005f71a404f714aa7e
|
8fcb01c1c6cb15e6502b918de93a23247ac6627b
|
refs/heads/master
| 2022-11-11T19:01:12.419937
| 2020-07-05T14:21:34
| 2020-07-05T14:21:34
| 277,103,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,558
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[79]:
'''Загружаем модуль numpy для работы со случайными числами и массивом из них'''
import numpy as np
'''Определяем функцию game_core_v2 с аргументом number (загаданное число), которая будет угадывать число'''
def game_core_v2(number):
'''Первой попыткой предполагаем число 50, объявляя для этого переменную predict,
затем в зависимости от того, больше оно или меньше загаданного, изменяем предполагаемое число на шаг step,
который с увеличением числа попыток на 1 уменьшается в 2 раза. Функция принимает загаданное число и
возвращает число попыток'''
predict = 50
count = 1
step = 51
while number != predict:
count += 1
step = round(step / 2)
if predict < number:
predict += step
elif predict > number:
predict -= step
return(count)
'''Определяем функцию score_game, которая будет 1000 раз генерировать случайное число.
Функция принимает результат работы функции game_core_v2 и возвращает среднее количество попыток за заданное число повторов (1000)'''
def score_game(game_core_v2):
count_ls = []
np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
random_array = np.random.randint(1,101, size=(1000))
for number in random_array:
count_ls.append(game_core_v2(number))
#print(game_core_v2(number)) #эта часть кода выводит на экран число попыток для каждого повтора работы программы
#print(len(count_ls)) #эта часть кода печатает номер текущей попытки (через длину списка)
score = int(np.mean(count_ls)) #эта часть кода находит среднее количество попыток за 1000 повторов
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return(score)
score_game(game_core_v2)
|
[
"noreply@github.com"
] |
noreply@github.com
|
7334b51d4d6dd97c7c79768d9c96ddcd4c916b09
|
4917860c0be0b281af29ee0f1f92dcddfffb3a6b
|
/faust_proj/wsgi.py
|
7dc2c9726a9acaef51aef894bb4268df2d272b75
|
[] |
no_license
|
Dineshs91/faust-example
|
fdb8238e8464eb971b8d65025efe83dd25151c37
|
99e19260a0ab591d91bb40ebea1b1bfc901ca1fd
|
refs/heads/master
| 2020-04-10T16:08:24.219541
| 2018-12-14T08:58:21
| 2018-12-14T08:58:21
| 161,134,463
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for faust_proj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'faust_proj.settings')
application = get_wsgi_application()
|
[
"dineshpy07@gmail.com"
] |
dineshpy07@gmail.com
|
bb8c7aede0462de9cd8180f39a0e1b02e5216735
|
d3c4848338fe8a36a307c955e8a96f32fc880019
|
/tests/test_selenium_common.py
|
f430bf2ca7c458bc29d650063b205594ee3e569e
|
[
"MIT"
] |
permissive
|
panarahc/product-database
|
d111555f5f801c18a7a46c7fd3a2173149d8acd3
|
af48bc3e580e3bd7b499990bb7c51aabed242f71
|
refs/heads/master
| 2023-04-19T16:09:08.115666
| 2021-01-17T22:23:45
| 2021-01-17T22:23:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42,371
|
py
|
"""
Test suite for the selenium test cases
"""
import os
import pytest
import time
import re
from django.urls import reverse
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from tests import BaseSeleniumTest
@pytest.mark.online
@pytest.mark.selenium
class TestCommonFunctions(BaseSeleniumTest):
def test_login_only_mode(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# open the homepage
browser.get(liveserver + reverse("productdb:home"))
expected_homepage_text = "This database contains information about network equipment like routers and " \
"switches from multiple vendors."
assert expected_homepage_text in browser.find_element_by_tag_name("body").text
# Login as superuser - verify, that the "continue without login" button is visible
browser.find_element_by_id("navbar_login").click()
time.sleep(3)
expected_login_continue_text = "continue without login"
assert expected_login_continue_text in browser.find_element_by_tag_name("body").text
# login as superuser
browser.find_element_by_id("username").send_keys(self.ADMIN_USERNAME)
browser.find_element_by_id("password").send_keys(self.ADMIN_PASSWORD)
browser.find_element_by_id("login_button").click()
time.sleep(3)
# change settings to login only mode and save settings
browser.find_element_by_id("navbar_admin").click()
browser.find_element_by_id("navbar_admin_settings").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Settings")
browser.find_element_by_id("id_login_only_mode").click()
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Settings saved successfully")
# go to the Product Database Homepage - it must be visible
browser.get(liveserver + reverse("productdb:home"))
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_homepage_text)
# create the product list for the test case
test_pl_name = "LoginOnly Product List"
test_pl_description = "A sample description for the Product List."
test_pl_product_list_ids = "C2960X-STACK;CAB-ACE\nWS-C2960-24TT-L;WS-C2960-24TC-S"
test_pl_product_list_id = "C2960X-STACK"
browser.find_element_by_id("product_list_link").click()
WebDriverWait(browser, 10).until(EC.presence_of_element_located((
By.XPATH,
"id('product_list_table_wrapper')")
))
browser.find_element_by_xpath("//button[span='Add New']").click()
WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.ID, "id_name")))
browser.find_element_by_id("id_name").send_keys(test_pl_name)
browser.find_element_by_id("id_description").send_keys(test_pl_description)
browser.find_element_by_id("id_string_product_list").send_keys(test_pl_product_list_ids)
browser.find_element_by_id("id_vendor").send_keys("C")
browser.find_element_by_id("submit").click()
WebDriverWait(browser, 10).until(EC.presence_of_element_located((
By.XPATH,
"id('product_list_table_wrapper')")
))
# logout - the login screen is visible
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_logout").click()
expected_login_text = "Please enter your credentials below."
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_login_text)
# go manually to the Product Database Homepage - you must be redirected to the login screen
browser.get(liveserver + reverse("productdb:home"))
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_login_text)
# verify, that the "continue without login" button is not visible
assert expected_login_continue_text not in browser.find_element_by_tag_name("body").text
# the product list must be reachable, even when in login only mode
pl = self.api_helper.get_product_list_by_name(liveserver, test_pl_name)
browser.get(liveserver + reverse("productdb:share-product_list", kwargs={"product_list_id": pl["id"]}))
# verify some basic attributes of the page
body = browser.find_element_by_tag_name("body").text
assert test_pl_name in body
assert test_pl_description in body
assert test_pl_product_list_id in body
assert "maintained by %s" % self.ADMIN_DISPLAY_NAME in body
assert "%s</a>" % test_pl_product_list_id not in body, \
"Link to Product Details should not be available"
# login as API user
browser.get(liveserver + reverse("productdb:home"))
browser.find_element_by_id("username").send_keys(self.API_USERNAME)
browser.find_element_by_id("password").send_keys(self.API_PASSWORD)
browser.find_element_by_id("login_button").click()
time.sleep(3)
# the Product Database Homepage must be visible
assert expected_homepage_text in browser.find_element_by_tag_name("body").text
# disable the login only mode
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_logout").click()
browser.find_element_by_id("username").send_keys(self.ADMIN_USERNAME)
browser.find_element_by_id("password").send_keys(self.ADMIN_PASSWORD)
browser.find_element_by_id("login_button").click()
time.sleep(3)
browser.find_element_by_id("navbar_admin").click()
browser.find_element_by_id("navbar_admin_settings").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Settings")
assert "Settings" in browser.find_element_by_tag_name("body").text
browser.find_element_by_id("id_login_only_mode").click()
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Settings saved successfully")
# delete the new product list
browser.get(liveserver + reverse("productdb:list-product_lists"))
browser.find_element_by_xpath("id('product_list_table')/tbody/tr[1]/td[2]").click()
time.sleep(1)
browser.find_element_by_xpath("//button[span='Delete Selected']").click()
time.sleep(3)
body = browser.find_element_by_tag_name("body").text
assert "Delete Product List" in body
browser.find_element_by_name("really_delete").click()
browser.find_element_by_id("submit").click()
time.sleep(3)
# verify that the product list is deleted
body = browser.find_element_by_tag_name("body").text
assert test_pl_description not in body
assert "Product List %s successfully deleted." % test_pl_name in body
# end session
self.logout_user(browser)
def test_change_password(self, browser, liveserver):
"""
test change password procedure with a different user (part of the selenium_tests fixture)
"""
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# login as the default API user
browser.get(liveserver + reverse("login"))
browser.find_element_by_id("username").send_keys("testpasswordchange")
browser.find_element_by_id("password").send_keys("api")
browser.find_element_by_id("login_button").click()
time.sleep(3)
# go to the change password dialog
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_change_password").click()
time.sleep(3)
assert "Old password" in browser.find_element_by_tag_name("body").text
# chang the password to api1234
browser.find_element_by_id("id_old_password").send_keys("api")
browser.find_element_by_id("id_new_password1").send_keys("api1234")
browser.find_element_by_id("id_new_password2").send_keys("api1234")
browser.find_element_by_id("submit").click()
time.sleep(3)
assert "Password change successful" in browser.find_element_by_tag_name("body").text
# logout
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_logout").click()
time.sleep(3)
expected_login_text = "Please enter your credentials below."
assert expected_login_text in browser.find_element_by_tag_name("body").text
# login with new password
browser.find_element_by_id("username").send_keys("testpasswordchange")
browser.find_element_by_id("password").send_keys("api1234")
browser.find_element_by_id("login_button").click()
time.sleep(3)
# the Product Database Homepage must be visible
expected_text = "This database contains information about network equipment like routers and " \
"switches from multiple vendors."
assert expected_text in browser.find_element_by_tag_name("body").text
# end session
self.logout_user(browser)
@pytest.mark.selenium
class TestUserProfile(BaseSeleniumTest):
def test_preferred_vendor_user_profile(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
browser.get(liveserver + reverse("productdb:home"))
# verify the vendor selection if the user is not logged in
browser.find_element_by_id("nav_browse").click()
browser.find_element_by_id("nav_browse_all_vendor_products").click()
assert "Browse Products by Vendor" in browser.find_element_by_class_name("page-header").text, \
"Should view the Browse Product by Vendor page"
# login
browser.find_element_by_id("navbar_login").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Please enter your credentials below.")
homepage_message = "Browse Products by Vendor"
self.login_user(browser, self.API_USERNAME, self.API_PASSWORD, homepage_message)
# verify the selected default vendor
pref_vendor_select = browser.find_element_by_id("vendor_selection")
assert "Cisco Systems" in pref_vendor_select.text, "selected by default"
# view the edit settings page
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_user_profile").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Edit User Profile")
# verify that the vendor with the ID 1 is selected
pref_vendor_select = browser.find_element_by_id("id_preferred_vendor")
assert "Cisco Systems" in pref_vendor_select.text
pref_vendor_select = Select(pref_vendor_select)
# change the vendor selection
changed_vendor_name = "Juniper Networks"
pref_vendor_select.select_by_visible_text(changed_vendor_name)
browser.find_element_by_id("submit").send_keys(Keys.ENTER)
# redirect to the Browse Products by Vendor
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Browse Products by Vendor")
# verify that the new default vendor is selected
pref_vendor_select = browser.find_element_by_id("vendor_selection")
assert changed_vendor_name in pref_vendor_select.text
# end session
self.logout_user(browser)
def test_email_change_in_user_profile(self, browser, liveserver):
"""
use separate user from the selenium_tests fixture
"""
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
browser.get(liveserver + reverse("productdb:home"))
# login
browser.find_element_by_id("navbar_login").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Please enter your credentials below.")
homepage_message = "This database contains information about network equipment like routers and switches " \
"from multiple vendors."
self.login_user(browser, "testuserprofilemail", self.API_PASSWORD, homepage_message)
# view the edit settings page
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_user_profile").click()
assert "api@localhost.localhost" in browser.find_element_by_id("id_email").get_attribute('value')
# change email
new_email = "a@b.com"
browser.find_element_by_id("id_email").clear()
browser.find_element_by_id("id_email").send_keys(new_email)
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, homepage_message)
# verify redirect to homepage
assert "User Profile successful updated" in browser.find_element_by_tag_name("body").text, \
"Should view a message that the user profile was saved"
# verify new value in email address
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_user_profile").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Edit User Profile")
assert new_email in browser.find_element_by_id("id_email").get_attribute('value'), \
"Show view the correct email address of the user (%s)" % new_email
# end session
self.logout_user(browser)
def test_search_option_in_user_profile(self, browser, liveserver):
"""
use separate user from the selenium_tests fixture
"""
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
search_term = "WS-C2960X-24T(D|S)"
browser.get(liveserver + reverse("productdb:home"))
# login
homepage_message = "This database contains information about network equipment like routers and switches " \
"from multiple vendors."
browser.find_element_by_id("navbar_login").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Please enter your credentials below.")
self.login_user(browser, "testregexsession", self.API_PASSWORD, homepage_message)
# go to the all products view
expected_content = "On this page, you can view all products that are stored in the database."
browser.find_element_by_id("nav_browse").click()
browser.find_element_by_id("nav_browse_all_products").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_content)
# try to search for the product
browser.find_element_by_id("column_search_Product ID").send_keys(search_term)
self.wait_for_text_to_be_displayed_in_body_tag(browser, "No matching records found")
# enable the regular expression search feature in the user profile
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_user_profile").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Contact eMail:")
expected_content = "On this page, you can view all products that are stored in the database."
browser.find_element_by_id("id_regex_search").click()
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_content)
browser.find_element_by_id("column_search_Product ID").send_keys(search_term)
time.sleep(3)
assert "WS-C2960X-24TS" in browser.find_element_by_tag_name("body").text, \
"Should show no results (regular expression is used but by default not enabled)"
assert "WS-C2960X-24TD" in browser.find_element_by_tag_name("body").text, \
"Should show no results (regular expression is used but by default not enabled)"
# end session
self.logout_user(browser)
@pytest.mark.selenium
class TestProductLists(BaseSeleniumTest):
def test_product_list(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
add_button_xpath = "//button[span='Add New']"
edit_button_xpath = "//button[span='Edit Selected']"
delete_button_xpath = "//button[span='Delete Selected']"
test_pl_name = "Test Product List"
test_pl_description = "A sample description for the Product List."
test_pl_product_list_ids = "C2960X-STACK;CAB-ACE\nWS-C2960-24TT-L;WS-C2960-24TC-S"
test_pl_product_list_id = "C2960X-STACK"
# open the homepage
browser.get(liveserver + reverse("productdb:home"))
# go to product list view
browser.find_element_by_id("nav_browse").click()
browser.find_element_by_id("nav_browse_all_product_lists").click()
time.sleep(3)
# verify that the add, edit and delete button is not visible
body = browser.find_element_by_tag_name("body").text
assert "Add New" not in body
assert "Edit Selected" not in body
assert "Delete Selected" not in body
# login to the page as admin user
browser.find_element_by_id("navbar_login").click()
time.sleep(3)
self.login_user(browser, self.ADMIN_USERNAME, self.ADMIN_PASSWORD, "All Product Lists")
# verify that the add, edit and delete buttons are visible
body = browser.find_element_by_tag_name("body").text
assert "Add New" in body
assert "Edit Selected" in body
assert "Delete Selected" in body
# create a new product list
browser.find_element_by_xpath(add_button_xpath).click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Add Product List")
browser.find_element_by_id("id_name").send_keys(test_pl_name)
browser.find_element_by_id("id_description").send_keys(test_pl_description)
browser.find_element_by_id("id_string_product_list").send_keys(test_pl_product_list_ids)
browser.find_element_by_id("id_vendor").send_keys("C")
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "All Product Lists")
assert test_pl_name in browser.find_element_by_tag_name("body").text
# view the newly created product list
browser.find_element_by_link_text(test_pl_name).click()
time.sleep(3)
body = browser.find_element_by_tag_name("body").text
assert test_pl_name in body
assert test_pl_description in body
assert test_pl_product_list_id in body
assert "maintained by %s" % self.ADMIN_DISPLAY_NAME in body
assert browser.find_element_by_link_text(test_pl_product_list_id) is not None, \
"Link to Product Details should be available"
# go back to the product list overview
browser.find_element_by_id("_back").click()
# edit the new product list
browser.find_element_by_xpath("id('product_list_table')/tbody/tr[1]/td[2]").click()
time.sleep(3)
browser.find_element_by_xpath(edit_button_xpath).click()
time.sleep(3)
browser.find_element_by_id("id_description").send_keys(" EDITED")
test_pl_description += " EDITED"
browser.find_element_by_id("submit").click()
time.sleep(3)
body = browser.find_element_by_tag_name("body").text
assert test_pl_description in body
# delete the new product list
browser.find_element_by_xpath("id('product_list_table')/tbody/tr[1]/td[2]").click()
time.sleep(1)
browser.find_element_by_xpath(delete_button_xpath).click()
time.sleep(3)
body = browser.find_element_by_tag_name("body").text
assert "Delete Product List" in body
browser.find_element_by_name("really_delete").click()
browser.find_element_by_id("submit").click()
time.sleep(3)
# verify that the product list is deleted
body = browser.find_element_by_tag_name("body").text
assert test_pl_description not in body
assert "Product List %s successfully deleted." % test_pl_name in body
@pytest.mark.selenium
class TestProductDatabaseViews(BaseSeleniumTest):
def test_search_on_homepage(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# navigate to the homepage
browser.get(liveserver + reverse("productdb:home"))
browser.find_element_by_id("search_text_field").send_keys("WS-C2960X-24")
browser.find_element_by_id("submit_search").click()
# verify page by page title
assert "All Products" in browser.find_element_by_tag_name("body").text
time.sleep(2)
# test table content
expected_table_content = """Vendor Product ID Description List Price Lifecycle State"""
contain_table_rows = [
"Cisco Systems WS-C2960X-24PD-L Catalyst 2960-X 24 GigE PoE 370W, 2 x 10G SFP+, LAN Base 4595.00 USD",
"Cisco Systems WS-C2960X-24PS-L Catalyst 2960-X 24 GigE PoE 370W, 4 x 1G SFP, LAN Base 3195.00 USD",
]
not_contain_table_rows = [
"Juniper Networks"
]
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in contain_table_rows:
assert r in table.text
for r in not_contain_table_rows:
assert r not in table.text
def test_product_group_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# navigate to the homepage
browser.get(liveserver + reverse("productdb:home"))
# go to the "All Product Groups" view
browser.find_element_by_id("nav_browse").click()
browser.find_element_by_id("nav_browse_all_product_groups").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "All Product Groups")
# test table content
expected_table_content = """Vendor\nName"""
table_rows = [
'Cisco Systems Catalyst 3850',
'Cisco Systems Catalyst 2960X',
'Cisco Systems Catalyst 2960',
'Juniper Networks EX2200',
]
table = browser.find_element_by_id('product_group_table')
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_table_content)
for r in table_rows:
assert r in table.text
# search product group by vendor column
table_rows = [
'Juniper Networks EX2200',
]
browser.find_element_by_id("column_search_Vendor").send_keys("Juni")
table = browser.find_element_by_id('product_group_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Vendor").clear()
# search product group by vendor column
table_rows = [
'Cisco Systems Catalyst 3850',
'Cisco Systems Catalyst 2960X',
'Cisco Systems Catalyst 2960',
]
browser.find_element_by_id("column_search_Name").send_keys("yst")
time.sleep(2)
table = browser.find_element_by_id('product_group_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Name").clear()
time.sleep(2)
# click on the "Catalyst 2960X" link
browser.find_element_by_partial_link_text("Catalyst 2960X").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Catalyst 2960X Product Group details")
# verify table content
expected_table_content = """Product ID\nDescription\nList Price Lifecycle State"""
table_rows = [
'C2960X-STACK',
'CAB-ACE',
'CAB-STK-E-0.5M',
]
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
# search product group by vendor column
table_rows = [
'WS-C2960X-24PD-L',
'WS-C2960X-24TD-L',
]
browser.find_element_by_id("column_search_Description").send_keys("2 x")
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Description").clear()
time.sleep(2)
# open detail page
browser.find_element_by_partial_link_text("C2960X-STACK").click()
detail_link = browser.current_url
self.wait_for_text_to_be_displayed_in_body_tag(browser, "C2960X-STACK Product details")
# verify that the "Internal Product ID" is not visible (because not set)
assert "Internal Product ID" not in browser.find_element_by_tag_name("body").text
# add an internal product ID and verify that it is visible
test_internal_product_id = "123456789-abcdef"
p = self.api_helper.update_product(liveserver_url=liveserver, product_id="C2960X-STACK",
internal_product_id=test_internal_product_id)
browser.get(liveserver + reverse("productdb:product-detail", kwargs={"product_id": p["id"]}))
page_text = browser.find_element_by_tag_name("body").text
assert "Internal Product ID" in page_text
assert test_internal_product_id in page_text
# end session
self.logout_user(browser)
def test_add_notification_message(self, browser, liveserver):
# go to the Product Database Homepage
browser.get(liveserver + reverse("productdb:home"))
browser.find_element_by_id("navbar_login").click()
time.sleep(3)
expected_homepage_text = "This database contains information about network equipment like routers and " \
"switches from multiple vendors."
self.login_user(
browser,
expected_content=expected_homepage_text,
username=self.ADMIN_USERNAME,
password=self.ADMIN_PASSWORD
)
# add a new notification message
browser.find_element_by_id("navbar_admin").click()
browser.find_element_by_id("navbar_admin_notification_message").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Add Notification Message")
# add content
title = "My message title"
summary_message = "summary message"
detailed_message = "detailed message"
browser.find_element_by_id("id_title").send_keys(title)
browser.find_element_by_id("id_summary_message").send_keys(summary_message)
browser.find_element_by_id("id_detailed_message").send_keys(detailed_message)
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, title)
assert summary_message in browser.find_element_by_tag_name("body").text
# end session
self.logout_user(browser)
def test_browse_products_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
expected_cisco_row = "C2960X-STACK Catalyst 2960-X FlexStack Plus Stacking Module 1195.00 USD"
expected_juniper_row = "EX-SFP-1GE-LX SFP 1000Base-LX Gigabit Ethernet Optics, 1310nm for " \
"10km transmission on SMF 1000.00 USD"
default_vendor = "Cisco Systems"
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:browse_vendor_products"))
time.sleep(5)
# check that the user sees a table
page_text = browser.find_element_by_tag_name('body').text
assert "Showing 1 to" in page_text
# the user sees a selection field, where the value "Cisco Systems" is selected
pl_selection = browser.find_element_by_id("vendor_selection")
assert default_vendor in pl_selection.text
# the table has three buttons: Copy, CSV and a PDF
dt_buttons = browser.find_element_by_class_name("dt-buttons")
assert "PDF" == dt_buttons.find_element_by_xpath("//button[span='PDF']").text
assert "Copy" == dt_buttons.find_element_by_xpath("//button[span='Copy']").text
assert "CSV" == dt_buttons.find_element_by_xpath("//button[span='CSV']").text
assert "Excel" == dt_buttons.find_element_by_xpath("//button[span='Excel']").text
# the table shows 10 entries from the list (below the table, there is a string "Showing 1 to 10 of \d+ entries"
dt_wrapper = browser.find_element_by_id("product_table_info")
assert re.match(r"Showing 1 to \d+ of \d+ entries", dt_wrapper.text) is not None
# the page reloads and the table contains now the element "C2960X-STACK" as the first element of the table
table = browser.find_element_by_id('product_table')
rows = table.find_elements_by_tag_name('tr')
assert expected_cisco_row in [row.text for row in rows]
# navigate to a detail view
link = browser.find_element_by_link_text("PWR-C1-350WAC")
browser.execute_script("return arguments[0].scrollIntoView();", link)
time.sleep(1)
test_product_id = "WS-C2960-24LT-L"
browser.find_element_by_link_text(test_product_id).click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "%s Product details" % test_product_id)
# reopen the browse vendor products table
browser.get(liveserver + reverse("productdb:browse_vendor_products"))
time.sleep(5)
# the user sees a selection field, where the value "Cisco Systems" is selected
pl_selection = browser.find_element_by_id("vendor_selection")
assert default_vendor in pl_selection.text
pl_selection = Select(pl_selection)
# the user chooses the list named "Juniper Networks" and press the button "view product list"
pl_selection.select_by_visible_text("Juniper Networks")
browser.find_element_by_id("submit").send_keys(Keys.ENTER)
self.wait_for_text_to_be_displayed_in_body_tag(browser, "EX-SFP-1GE-LX")
# the page reloads and the table contains now the element "EX-SFP-1GE-LX" as the first element of the table
table = browser.find_element_by_id('product_table')
rows = table.find_elements_by_tag_name('tr')
match = False
for i in range(0, 3):
match = (expected_juniper_row, [row.text for row in rows])
if match:
break
time.sleep(3)
if not match:
pytest.fail("Element not found")
def test_browse_products_view_csv_export(self, browser, liveserver, test_download_dir):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:browse_vendor_products"))
# the user sees a selection field, where the value "Cisco Systems" is selected
vendor_name = "Cisco Systems"
pl_selection = browser.find_element_by_id("vendor_selection")
assert vendor_name in pl_selection.text
# the user hits the button CSV
dt_buttons = browser.find_element_by_class_name("dt-buttons")
dt_buttons.find_element_by_xpath("//button[span='CSV']").click()
# the file should download automatically (firefox is configured this way)
time.sleep(2)
# verify that the file is a CSV formatted field (with ";" as delimiter)
file = os.path.join(test_download_dir, "export products - %s.csv" % vendor_name)
with open(file, "r+", encoding="utf-8") as f:
assert "Product ID;Description;List Price;Lifecycle State\n" == f.readline()
def test_search_function_on_browse_vendor_products_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:browse_vendor_products"))
time.sleep(5)
# he enters a search term in the search box
search_term = "WS-C2960X-24P"
search_xpath = '//div[@class="col-sm-4"]/div[@id="product_table_filter"]/label/input[@type="search"]'
search = browser.find_element_by_xpath(search_xpath)
search.send_keys(search_term)
time.sleep(3)
# show product groups
dt_buttons = browser.find_element_by_class_name("dt-buttons")
dt_buttons.find_element_by_xpath("//button[span='show additional columns ']").click()
browser.find_element_by_link_text("Internal Product ID").click()
browser.find_element_by_link_text("Product Group").click()
# the table performs the search function and a defined amount of rows is displayed
expected_table_content = "Product ID Product Group Description " \
"List Price Lifecycle State Internal Product ID"
table_rows = [
"WS-C2960X-24PD-L Catalyst 2960X Catalyst 2960-X 24 GigE PoE 370W, 2 x 10G SFP+, "
"LAN Base 4595.00 USD 2960x-24pd-l",
"WS-C2960X-24PS-L Catalyst 2960X Catalyst 2960-X 24 GigE PoE 370W, 4 x 1G SFP, "
"LAN Base 3195.00 USD 2960x-24ps-l"
]
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_xpath(search_xpath).clear()
time.sleep(1)
# search product by column (contains)
browser.find_element_by_id("column_search_Product ID").send_keys("WS-C2960X-24P")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Product ID").clear()
# search product by column (contains)
browser.find_element_by_id("column_search_Product Group").send_keys("2960X")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Product Group").clear()
# search description by column
browser.find_element_by_id("column_search_Description").send_keys("10G SFP")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
assert table_rows[0] in table.text
browser.find_element_by_id("column_search_Description").clear()
# search description by column
browser.find_element_by_id("column_search_List Price").send_keys("3195")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
assert r[1] in table.text
browser.find_element_by_id("column_search_List Price").clear()
def test_browse_all_products_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
expected_cisco_row = "Cisco Systems C2960X-STACK Catalyst 2960-X FlexStack Plus Stacking Module 1195.00 USD"
expected_juniper_row = "Juniper Networks EX-SFP-1GE-LX SFP 1000Base-LX Gigabit Ethernet Optics, 1310nm for " \
"10km transmission on SMF 1000.00 USD"
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:all_products"))
# check that the user sees a table
time.sleep(5)
page_text = browser.find_element_by_tag_name('body').text
assert "Showing 1 to" in page_text
# the table has three buttons: Copy, CSV and a PDF
dt_buttons = browser.find_element_by_class_name("dt-buttons")
assert "PDF" == dt_buttons.find_element_by_xpath("//button[span='PDF']").text
assert "Copy" == dt_buttons.find_element_by_xpath("//button[span='Copy']").text
assert "CSV" == dt_buttons.find_element_by_xpath("//button[span='CSV']").text
assert "Excel" == dt_buttons.find_element_by_xpath("//button[span='Excel']").text
# the table shows 10 entries from the list (below the table, there is a string "Showing 1 to 10 of \d+ entries"
dt_wrapper = browser.find_element_by_id("product_table_info")
assert re.match(r"Showing 1 to \d+ of \d+ entries", dt_wrapper.text) is not None
# the page reloads and the table contains now the element "C2960X-STACK" as the first element of the table
table = browser.find_element_by_id('product_table')
rows = table.find_elements_by_tag_name('tr')
assert expected_cisco_row in [row.text for row in rows]
# the page reloads and the table contains now the element "EX-SFP-1GE-LX" as the first element of the table
table = browser.find_element_by_id('product_table')
rows = table.find_elements_by_tag_name('tr')
match = False
for i in range(0, 3):
match = (expected_juniper_row,
[row.text for row in rows])
if match:
break
time.sleep(3)
if not match:
pytest.fail("Element not found")
# navigate to a detail view
test_product_id = "GLC-LH-SMD="
browser.find_element_by_link_text(test_product_id).click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "%s Product details" % test_product_id)
def test_browse_all_products_view_csv_export(self, browser, liveserver, test_download_dir):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:all_products"))
# the user hits the button CSV
dt_buttons = browser.find_element_by_class_name("dt-buttons")
dt_buttons.find_element_by_xpath("//button[span='CSV']").click()
# the file should download automatically (firefox is configured this way)
time.sleep(2)
# verify that the file is a CSV formatted field (with ";" as delimiter)
file = os.path.join(test_download_dir, "export products.csv")
with open(file, "r+", encoding="utf-8") as f:
assert "Vendor;Product ID;Description;List Price;Lifecycle State\n" == f.readline()
def test_search_function_on_all_products_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:all_products"))
# he enters a search term in the search box
search_term = "WS-C2960X-24P"
search_xpath = '//div[@class="col-sm-4"]/div[@id="product_table_filter"]/label/input[@type="search"]'
search = browser.find_element_by_xpath(search_xpath)
search.send_keys(search_term)
time.sleep(3)
# the table performs the search function and a defined amount of rows is displayed
expected_table_content = """Vendor Product ID Description List Price Lifecycle State"""
table_rows = [
'WS-C2960X-24PD-L Catalyst 2960-X 24 GigE PoE 370W, 2 x 10G SFP+, LAN Base 4595.00 USD',
'WS-C2960X-24PS-L Catalyst 2960-X 24 GigE PoE 370W, 4 x 1G SFP, LAN Base 3195.00 USD',
]
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_xpath(search_xpath).clear()
time.sleep(1)
# search vendor by column
browser.find_element_by_id("column_search_Vendor").send_keys("Cisco")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Vendor").clear()
# search product by column
browser.find_element_by_id("column_search_Product ID").send_keys("WS-C2960X-24P")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Product ID").clear()
# search description by column
browser.find_element_by_id("column_search_Description").send_keys("10G SFP")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
assert table_rows[0] in table.text
browser.find_element_by_id("column_search_Description").clear()
# search description by column
browser.find_element_by_id("column_search_List Price").send_keys("3195")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
assert r[1] in table.text
browser.find_element_by_id("column_search_List Price").clear()
|
[
"henry@codingnetworker.com"
] |
henry@codingnetworker.com
|
e5514210e89b80409333bef0bf14804be6c84f11
|
4546a96d55a2cc3736dbf668c45677b572735f08
|
/get_focused_output.sh
|
31c299ae5e1814901c72c08051bb5e66b3f4ee00
|
[] |
no_license
|
teunissenstefan/scripts
|
eea268faee617acd80f8700b47e47e50032a0497
|
efab9151f4e93e616ec9846e841629b329be9596
|
refs/heads/master
| 2022-09-11T17:45:30.285304
| 2022-08-09T07:17:44
| 2022-08-09T07:17:44
| 244,189,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
sh
|
#!/usr/bin/env python3
import subprocess
import json
try:
outputs = subprocess.check_output(["swaymsg", "-t", "get_outputs"])
outputs = json.loads(outputs.decode('utf-8').replace("'", '"'))
for idx, output in enumerate(outputs):
if output["focused"]:
print(idx + 1)
except:
print(0)
|
[
"stefanteunissen1@gmail.com"
] |
stefanteunissen1@gmail.com
|
d081c90ee2be7a970eccc901bbe411b6143cc227
|
bb98adfcca0865092761eb3ce95da6d3e016bc42
|
/wisdomgate/settings.py
|
73b2b8a4fe8ca2927991df9382aeb125f1648ec8
|
[] |
no_license
|
iskenderserkan/wisdomgate
|
e2332afa3786e6440a046e65bdad4d5b48dd5cd9
|
258041aeefac7ae654726271fc1b1bc409fc7ddd
|
refs/heads/master
| 2021-01-25T14:56:45.081826
| 2018-03-04T00:24:59
| 2018-03-04T00:24:59
| 123,740,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,249
|
py
|
"""
Django settings for wisdomgate project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tickai_&nsyh#@!86bozw-_6of-fi(ri$_2v9!w!mj#uhnbvb-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gb_knowledge',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wisdomgate.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wisdomgate.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'TR-tr'
TIME_ZONE = 'Europe/Istanbul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"iskenderserkan@gmail.com"
] |
iskenderserkan@gmail.com
|
1459e00c12efcf943450d0d9fbb1d34e6ba7db4b
|
93d78f2dd852b90d295bd523fd0bc09a644ee0d2
|
/test/sql/test_operators.py
|
e8ad88511482f9009137ee1ea40257fb924e0846
|
[
"MIT"
] |
permissive
|
mrocklin/sqlalchemy
|
ff13d4d07ba46a049da9611d356d07498e95337d
|
156f473de00024688404d73aea305cd4fc452638
|
refs/heads/master
| 2020-12-03T09:30:34.956612
| 2014-12-01T18:31:48
| 2014-12-01T18:31:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60,743
|
py
|
from sqlalchemy.testing import fixtures, eq_, is_, is_not_
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.sql import column, desc, asc, literal, collate, null, true, false
from sqlalchemy.sql.expression import BinaryExpression, \
ClauseList, Grouping, \
UnaryExpression, select, union, func, tuple_
from sqlalchemy.sql import operators, table
import operator
from sqlalchemy import String, Integer, LargeBinary
from sqlalchemy import exc
from sqlalchemy.engine import default
from sqlalchemy.sql.elements import _literal_as_text
from sqlalchemy.schema import Column, Table, MetaData
from sqlalchemy.types import TypeEngine, TypeDecorator, UserDefinedType, Boolean
from sqlalchemy.dialects import mysql, firebird, postgresql, oracle, \
sqlite, mssql
from sqlalchemy import util
import datetime
import collections
from sqlalchemy import text, literal_column
from sqlalchemy import and_, not_, between, or_
from sqlalchemy.sql import true, false, null
class LoopOperate(operators.ColumnOperators):
def operate(self, op, *other, **kwargs):
return op
class DefaultColumnComparatorTest(fixtures.TestBase):
def _do_scalar_test(self, operator, compare_to):
left = column('left')
assert left.comparator.operate(operator).compare(
compare_to(left)
)
self._loop_test(operator)
def _do_operate_test(self, operator, right=column('right')):
left = column('left')
assert left.comparator.operate(
operator,
right).compare(
BinaryExpression(
_literal_as_text(left),
_literal_as_text(right),
operator))
assert operator(
left,
right).compare(
BinaryExpression(
_literal_as_text(left),
_literal_as_text(right),
operator))
self._loop_test(operator, right)
def _loop_test(self, operator, *arg):
l = LoopOperate()
is_(
operator(l, *arg),
operator
)
def test_desc(self):
self._do_scalar_test(operators.desc_op, desc)
def test_asc(self):
self._do_scalar_test(operators.asc_op, asc)
def test_plus(self):
self._do_operate_test(operators.add)
def test_is_null(self):
self._do_operate_test(operators.is_, None)
def test_isnot_null(self):
self._do_operate_test(operators.isnot, None)
def test_is_null_const(self):
self._do_operate_test(operators.is_, null())
def test_is_true_const(self):
self._do_operate_test(operators.is_, true())
def test_is_false_const(self):
self._do_operate_test(operators.is_, false())
def test_equals_true(self):
self._do_operate_test(operators.eq, True)
def test_notequals_true(self):
self._do_operate_test(operators.ne, True)
def test_is_true(self):
self._do_operate_test(operators.is_, True)
def test_isnot_true(self):
self._do_operate_test(operators.isnot, True)
def test_is_false(self):
self._do_operate_test(operators.is_, False)
def test_isnot_false(self):
self._do_operate_test(operators.isnot, False)
def test_like(self):
self._do_operate_test(operators.like_op)
def test_notlike(self):
self._do_operate_test(operators.notlike_op)
def test_ilike(self):
self._do_operate_test(operators.ilike_op)
def test_notilike(self):
self._do_operate_test(operators.notilike_op)
def test_is(self):
self._do_operate_test(operators.is_)
def test_isnot(self):
self._do_operate_test(operators.isnot)
def test_no_getitem(self):
assert_raises_message(
NotImplementedError,
"Operator 'getitem' is not supported on this expression",
self._do_operate_test, operators.getitem
)
assert_raises_message(
NotImplementedError,
"Operator 'getitem' is not supported on this expression",
lambda: column('left')[3]
)
def test_in(self):
left = column('left')
assert left.comparator.operate(operators.in_op, [1, 2, 3]).compare(
BinaryExpression(
left,
Grouping(ClauseList(
literal(1), literal(2), literal(3)
)),
operators.in_op
)
)
self._loop_test(operators.in_op, [1, 2, 3])
def test_notin(self):
left = column('left')
assert left.comparator.operate(operators.notin_op, [1, 2, 3]).compare(
BinaryExpression(
left,
Grouping(ClauseList(
literal(1), literal(2), literal(3)
)),
operators.notin_op
)
)
self._loop_test(operators.notin_op, [1, 2, 3])
def test_in_no_accept_list_of_non_column_element(self):
left = column('left')
foo = ClauseList()
assert_raises_message(
exc.InvalidRequestError,
r"in_\(\) accepts either a list of expressions or a selectable:",
left.in_, [foo]
)
def test_in_no_accept_non_list_non_selectable(self):
left = column('left')
right = column('right')
assert_raises_message(
exc.InvalidRequestError,
r"in_\(\) accepts either a list of expressions or a selectable:",
left.in_, right
)
def test_in_no_accept_non_list_thing_with_getitem(self):
# test [ticket:2726]
class HasGetitem(String):
class comparator_factory(String.Comparator):
def __getitem__(self, value):
return value
left = column('left')
right = column('right', HasGetitem)
assert_raises_message(
exc.InvalidRequestError,
r"in_\(\) accepts either a list of expressions or a selectable:",
left.in_, right
)
def test_collate(self):
left = column('left')
right = "some collation"
left.comparator.operate(operators.collate, right).compare(
collate(left, right)
)
def test_concat(self):
self._do_operate_test(operators.concat_op)
class CustomUnaryOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def _factorial_fixture(self):
class MyInteger(Integer):
class comparator_factory(Integer.Comparator):
def factorial(self):
return UnaryExpression(self.expr,
modifier=operators.custom_op("!"),
type_=MyInteger)
def factorial_prefix(self):
return UnaryExpression(self.expr,
operator=operators.custom_op("!!"),
type_=MyInteger)
def __invert__(self):
return UnaryExpression(self.expr,
operator=operators.custom_op("!!!"),
type_=MyInteger)
return MyInteger
def test_factorial(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
col.factorial(),
"somecol !"
)
def test_double_factorial(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
col.factorial().factorial(),
"somecol ! !"
)
def test_factorial_prefix(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
col.factorial_prefix(),
"!! somecol"
)
def test_factorial_invert(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
~col,
"!!! somecol"
)
def test_double_factorial_invert(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
~(~col),
"!!! (!!! somecol)"
)
def test_unary_no_ops(self):
assert_raises_message(
exc.CompileError,
"Unary expression has no operator or modifier",
UnaryExpression(literal("x")).compile
)
def test_unary_both_ops(self):
assert_raises_message(
exc.CompileError,
"Unary expression does not support operator and "
"modifier simultaneously",
UnaryExpression(literal("x"),
operator=operators.custom_op("x"),
modifier=operators.custom_op("y")).compile
)
class _CustomComparatorTests(object):
def test_override_builtin(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override(c1)
def test_column_proxy(self):
t = Table('t', MetaData(),
Column('foo', self._add_override_factory())
)
proxied = t.select().c.foo
self._assert_add_override(proxied)
self._assert_and_override(proxied)
def test_alias_proxy(self):
t = Table('t', MetaData(),
Column('foo', self._add_override_factory())
)
proxied = t.alias().c.foo
self._assert_add_override(proxied)
self._assert_and_override(proxied)
def test_binary_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override(c1 - 6)
self._assert_and_override(c1 - 6)
def test_reverse_binary_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override(6 - c1)
self._assert_and_override(6 - c1)
def test_binary_multi_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override((c1 - 6) + 5)
self._assert_and_override((c1 - 6) + 5)
def test_no_boolean_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_not_add_override(c1 == 56)
self._assert_not_and_override(c1 == 56)
def _assert_and_override(self, expr):
assert (expr & text("5")).compare(
expr.op("goofy_and")(text("5"))
)
def _assert_add_override(self, expr):
assert (expr + 5).compare(
expr.op("goofy")(5)
)
def _assert_not_add_override(self, expr):
assert not (expr + 5).compare(
expr.op("goofy")(5)
)
def _assert_not_and_override(self, expr):
assert not (expr & text("5")).compare(
expr.op("goofy_and")(text("5"))
)
class CustomComparatorTest(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger
class TypeDecoratorComparatorTest(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(TypeDecorator):
impl = Integer
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger
class TypeDecoratorWVariantComparatorTest(
_CustomComparatorTests,
fixtures.TestBase):
def _add_override_factory(self):
class SomeOtherInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("not goofy")(other)
def __and__(self, other):
return self.expr.op("not goofy_and")(other)
class MyInteger(TypeDecorator):
impl = Integer
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger().with_variant(SomeOtherInteger, "mysql")
class CustomEmbeddedinTypeDecoratorTest(
_CustomComparatorTests,
fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
class MyDecInteger(TypeDecorator):
impl = MyInteger
return MyDecInteger
class NewOperatorTest(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
self.expr = expr
def foob(self, other):
return self.expr.op("foob")(other)
return MyInteger
def _assert_add_override(self, expr):
assert (expr.foob(5)).compare(
expr.op("foob")(5)
)
def _assert_not_add_override(self, expr):
assert not hasattr(expr, "foob")
def _assert_and_override(self, expr):
pass
def _assert_not_and_override(self, expr):
pass
class ExtensionOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_contains(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def contains(self, other, **kw):
return self.op("->")(other)
self.assert_compile(
Column('x', MyType()).contains(5),
"x -> :x_1"
)
def test_getitem(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __getitem__(self, index):
return self.op("->")(index)
self.assert_compile(
Column('x', MyType())[5],
"x -> :x_1"
)
def test_op_not_an_iterator(self):
# see [ticket:2726]
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __getitem__(self, index):
return self.op("->")(index)
col = Column('x', MyType())
assert not isinstance(col, collections.Iterable)
def test_lshift(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __lshift__(self, other):
return self.op("->")(other)
self.assert_compile(
Column('x', MyType()) << 5,
"x -> :x_1"
)
def test_rshift(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __rshift__(self, other):
return self.op("->")(other)
self.assert_compile(
Column('x', MyType()) >> 5,
"x -> :x_1"
)
class BooleanEvalTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test standalone booleans being wrapped in an AsBoolean, as well
as true/false compilation."""
def _dialect(self, native_boolean):
d = default.DefaultDialect()
d.supports_native_boolean = native_boolean
return d
def test_one(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(c),
"SELECT x WHERE x",
dialect=self._dialect(True)
)
def test_two_a(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(c),
"SELECT x WHERE x = 1",
dialect=self._dialect(False)
)
def test_two_b(self):
c = column('x', Boolean)
self.assert_compile(
select([c], whereclause=c),
"SELECT x WHERE x = 1",
dialect=self._dialect(False)
)
def test_three_a(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(~c),
"SELECT x WHERE x = 0",
dialect=self._dialect(False)
)
def test_three_b(self):
c = column('x', Boolean)
self.assert_compile(
select([c], whereclause=~c),
"SELECT x WHERE x = 0",
dialect=self._dialect(False)
)
def test_four(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(~c),
"SELECT x WHERE NOT x",
dialect=self._dialect(True)
)
def test_five_a(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).having(c),
"SELECT x HAVING x = 1",
dialect=self._dialect(False)
)
def test_five_b(self):
c = column('x', Boolean)
self.assert_compile(
select([c], having=c),
"SELECT x HAVING x = 1",
dialect=self._dialect(False)
)
def test_six(self):
self.assert_compile(
or_(false(), true()),
"1 = 1",
dialect=self._dialect(False)
)
def test_eight(self):
self.assert_compile(
and_(false(), true()),
"false",
dialect=self._dialect(True)
)
def test_nine(self):
self.assert_compile(
and_(false(), true()),
"0 = 1",
dialect=self._dialect(False)
)
def test_ten(self):
c = column('x', Boolean)
self.assert_compile(
c == 1,
"x = :x_1",
dialect=self._dialect(False)
)
def test_eleven(self):
c = column('x', Boolean)
self.assert_compile(
c.is_(true()),
"x IS true",
dialect=self._dialect(True)
)
def test_twelve(self):
c = column('x', Boolean)
# I don't have a solution for this one yet,
# other than adding some heavy-handed conditionals
# into compiler
self.assert_compile(
c.is_(true()),
"x IS 1",
dialect=self._dialect(False)
)
class ConjunctionTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test interaction of and_()/or_() with boolean , null constants
"""
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
def test_one(self):
self.assert_compile(~and_(true()), "false")
def test_two(self):
self.assert_compile(or_(~and_(true())), "false")
def test_three(self):
self.assert_compile(or_(and_()), "")
def test_four(self):
x = column('x')
self.assert_compile(
and_(or_(x == 5), or_(x == 7)),
"x = :x_1 AND x = :x_2")
def test_five(self):
x = column("x")
self.assert_compile(
and_(true()._ifnone(None), x == 7),
"x = :x_1"
)
def test_six(self):
x = column("x")
self.assert_compile(or_(true(), x == 7), "true")
self.assert_compile(or_(x == 7, true()), "true")
self.assert_compile(~or_(x == 7, true()), "false")
def test_six_pt_five(self):
x = column("x")
self.assert_compile(select([x]).where(or_(x == 7, true())),
"SELECT x WHERE true")
self.assert_compile(
select(
[x]).where(
or_(
x == 7,
true())),
"SELECT x WHERE 1 = 1",
dialect=default.DefaultDialect(
supports_native_boolean=False))
def test_seven(self):
x = column("x")
self.assert_compile(
and_(true(), x == 7, true(), x == 9),
"x = :x_1 AND x = :x_2")
def test_eight(self):
x = column("x")
self.assert_compile(
or_(false(), x == 7, false(), x == 9),
"x = :x_1 OR x = :x_2")
def test_nine(self):
x = column("x")
self.assert_compile(
and_(x == 7, x == 9, false(), x == 5),
"false"
)
self.assert_compile(
~and_(x == 7, x == 9, false(), x == 5),
"true"
)
def test_ten(self):
self.assert_compile(
and_(None, None),
"NULL AND NULL"
)
def test_eleven(self):
x = column("x")
self.assert_compile(
select([x]).where(None).where(None),
"SELECT x WHERE NULL AND NULL"
)
def test_twelve(self):
x = column("x")
self.assert_compile(
select([x]).where(and_(None, None)),
"SELECT x WHERE NULL AND NULL"
)
def test_thirteen(self):
x = column("x")
self.assert_compile(
select([x]).where(~and_(None, None)),
"SELECT x WHERE NOT (NULL AND NULL)"
)
def test_fourteen(self):
x = column("x")
self.assert_compile(
select([x]).where(~null()),
"SELECT x WHERE NOT NULL"
)
def test_constant_non_singleton(self):
is_not_(null(), null())
is_not_(false(), false())
is_not_(true(), true())
def test_constant_render_distinct(self):
self.assert_compile(
select([null(), null()]),
"SELECT NULL AS anon_1, NULL AS anon_2"
)
self.assert_compile(
select([true(), true()]),
"SELECT true AS anon_1, true AS anon_2"
)
self.assert_compile(
select([false(), false()]),
"SELECT false AS anon_1, false AS anon_2"
)
class OperatorPrecedenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table('op', column('field'))
def test_operator_precedence_1(self):
self.assert_compile(
self.table2.select((self.table2.c.field == 5) == None),
"SELECT op.field FROM op WHERE (op.field = :field_1) IS NULL")
def test_operator_precedence_2(self):
self.assert_compile(
self.table2.select(
(self.table2.c.field + 5) == self.table2.c.field),
"SELECT op.field FROM op WHERE op.field + :field_1 = op.field")
def test_operator_precedence_3(self):
self.assert_compile(
self.table2.select((self.table2.c.field + 5) * 6),
"SELECT op.field FROM op WHERE (op.field + :field_1) * :param_1")
def test_operator_precedence_4(self):
self.assert_compile(
self.table2.select(
(self.table2.c.field * 5) + 6),
"SELECT op.field FROM op WHERE op.field * :field_1 + :param_1")
def test_operator_precedence_5(self):
self.assert_compile(self.table2.select(
5 + self.table2.c.field.in_([5, 6])),
"SELECT op.field FROM op WHERE :param_1 + "
"(op.field IN (:field_1, :field_2))")
def test_operator_precedence_6(self):
self.assert_compile(self.table2.select(
(5 + self.table2.c.field).in_([5, 6])),
"SELECT op.field FROM op WHERE :field_1 + op.field "
"IN (:param_1, :param_2)")
def test_operator_precedence_7(self):
self.assert_compile(self.table2.select(
not_(and_(self.table2.c.field == 5,
self.table2.c.field == 7))),
"SELECT op.field FROM op WHERE NOT "
"(op.field = :field_1 AND op.field = :field_2)")
def test_operator_precedence_8(self):
self.assert_compile(
self.table2.select(
not_(
self.table2.c.field == 5)),
"SELECT op.field FROM op WHERE op.field != :field_1")
def test_operator_precedence_9(self):
self.assert_compile(self.table2.select(
not_(self.table2.c.field.between(5, 6))),
"SELECT op.field FROM op WHERE "
"op.field NOT BETWEEN :field_1 AND :field_2")
def test_operator_precedence_10(self):
self.assert_compile(
self.table2.select(
not_(
self.table2.c.field) == 5),
"SELECT op.field FROM op WHERE (NOT op.field) = :param_1")
def test_operator_precedence_11(self):
self.assert_compile(self.table2.select(
(self.table2.c.field == self.table2.c.field).
between(False, True)),
"SELECT op.field FROM op WHERE (op.field = op.field) "
"BETWEEN :param_1 AND :param_2")
def test_operator_precedence_12(self):
self.assert_compile(self.table2.select(
between((self.table2.c.field == self.table2.c.field),
False, True)),
"SELECT op.field FROM op WHERE (op.field = op.field) "
"BETWEEN :param_1 AND :param_2")
def test_operator_precedence_13(self):
self.assert_compile(
self.table2.select(
self.table2.c.field.match(
self.table2.c.field).is_(None)),
"SELECT op.field FROM op WHERE (op.field MATCH op.field) IS NULL")
def test_operator_precedence_collate_1(self):
self.assert_compile(
self.table1.c.name == literal('foo').collate('utf-8'),
"mytable.name = (:param_1 COLLATE utf-8)"
)
def test_operator_precedence_collate_2(self):
self.assert_compile(
(self.table1.c.name == literal('foo')).collate('utf-8'),
"mytable.name = :param_1 COLLATE utf-8"
)
def test_operator_precedence_collate_3(self):
self.assert_compile(
self.table1.c.name.collate('utf-8') == 'foo',
"(mytable.name COLLATE utf-8) = :param_1"
)
def test_operator_precedence_collate_4(self):
self.assert_compile(
and_(
(self.table1.c.name == literal('foo')).collate('utf-8'),
(self.table2.c.field == literal('bar')).collate('utf-8'),
),
"mytable.name = :param_1 COLLATE utf-8 "
"AND op.field = :param_2 COLLATE utf-8"
)
def test_operator_precedence_collate_5(self):
self.assert_compile(
select([self.table1.c.name]).order_by(
self.table1.c.name.collate('utf-8').desc()),
"SELECT mytable.name FROM mytable "
"ORDER BY mytable.name COLLATE utf-8 DESC"
)
def test_operator_precedence_collate_6(self):
self.assert_compile(
select([self.table1.c.name]).order_by(
self.table1.c.name.collate('utf-8').desc().nullslast()),
"SELECT mytable.name FROM mytable "
"ORDER BY mytable.name COLLATE utf-8 DESC NULLS LAST"
)
def test_operator_precedence_collate_7(self):
self.assert_compile(
select([self.table1.c.name]).order_by(
self.table1.c.name.collate('utf-8').asc()),
"SELECT mytable.name FROM mytable "
"ORDER BY mytable.name COLLATE utf-8 ASC"
)
def test_commutative_operators(self):
self.assert_compile(
literal("a") + literal("b") * literal("c"),
":param_1 || :param_2 * :param_3"
)
def test_op_operators(self):
self.assert_compile(
self.table1.select(self.table1.c.myid.op('hoho')(12) == 14),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable WHERE (mytable.myid hoho :myid_1) = :param_1"
)
def test_op_operators_comma_precedence(self):
self.assert_compile(
func.foo(self.table1.c.myid.op('hoho')(12)),
"foo(mytable.myid hoho :myid_1)"
)
def test_op_operators_comparison_precedence(self):
self.assert_compile(
self.table1.c.myid.op('hoho')(12) == 5,
"(mytable.myid hoho :myid_1) = :param_1"
)
def test_op_operators_custom_precedence(self):
op1 = self.table1.c.myid.op('hoho', precedence=5)
op2 = op1(5).op('lala', precedence=4)(4)
op3 = op1(5).op('lala', precedence=6)(4)
self.assert_compile(op2, "mytable.myid hoho :myid_1 lala :param_1")
self.assert_compile(op3, "(mytable.myid hoho :myid_1) lala :param_1")
class OperatorAssociativityTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_associativity_1(self):
f = column('f')
self.assert_compile(f - f, "f - f")
def test_associativity_2(self):
f = column('f')
self.assert_compile(f - f - f, "(f - f) - f")
def test_associativity_3(self):
f = column('f')
self.assert_compile((f - f) - f, "(f - f) - f")
def test_associativity_4(self):
f = column('f')
self.assert_compile((f - f).label('foo') - f, "(f - f) - f")
def test_associativity_5(self):
f = column('f')
self.assert_compile(f - (f - f), "f - (f - f)")
def test_associativity_6(self):
f = column('f')
self.assert_compile(f - (f - f).label('foo'), "f - (f - f)")
def test_associativity_7(self):
f = column('f')
# because - less precedent than /
self.assert_compile(f / (f - f), "f / (f - f)")
def test_associativity_8(self):
f = column('f')
self.assert_compile(f / (f - f).label('foo'), "f / (f - f)")
def test_associativity_9(self):
f = column('f')
self.assert_compile(f / f - f, "f / f - f")
def test_associativity_10(self):
f = column('f')
self.assert_compile((f / f) - f, "f / f - f")
def test_associativity_11(self):
f = column('f')
self.assert_compile((f / f).label('foo') - f, "f / f - f")
def test_associativity_12(self):
f = column('f')
# because / more precedent than -
self.assert_compile(f - (f / f), "f - f / f")
def test_associativity_13(self):
f = column('f')
self.assert_compile(f - (f / f).label('foo'), "f - f / f")
def test_associativity_14(self):
f = column('f')
self.assert_compile(f - f / f, "f - f / f")
def test_associativity_15(self):
f = column('f')
self.assert_compile((f - f) / f, "(f - f) / f")
def test_associativity_16(self):
f = column('f')
self.assert_compile(((f - f) / f) - f, "(f - f) / f - f")
def test_associativity_17(self):
f = column('f')
# - lower precedence than /
self.assert_compile((f - f) / (f - f), "(f - f) / (f - f)")
def test_associativity_18(self):
f = column('f')
# / higher precedence than -
self.assert_compile((f / f) - (f / f), "f / f - f / f")
def test_associativity_19(self):
f = column('f')
self.assert_compile((f / f) - (f - f), "f / f - (f - f)")
def test_associativity_20(self):
f = column('f')
self.assert_compile((f / f) / (f - f), "(f / f) / (f - f)")
def test_associativity_21(self):
f = column('f')
self.assert_compile(f / (f / (f - f)), "f / (f / (f - f))")
class InTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String)
)
def test_in_1(self):
self.assert_compile(self.table1.c.myid.in_(['a']),
"mytable.myid IN (:myid_1)")
def test_in_2(self):
self.assert_compile(~self.table1.c.myid.in_(['a']),
"mytable.myid NOT IN (:myid_1)")
def test_in_3(self):
self.assert_compile(self.table1.c.myid.in_(['a', 'b']),
"mytable.myid IN (:myid_1, :myid_2)")
def test_in_4(self):
self.assert_compile(self.table1.c.myid.in_(iter(['a', 'b'])),
"mytable.myid IN (:myid_1, :myid_2)")
def test_in_5(self):
self.assert_compile(self.table1.c.myid.in_([literal('a')]),
"mytable.myid IN (:param_1)")
def test_in_6(self):
self.assert_compile(self.table1.c.myid.in_([literal('a'), 'b']),
"mytable.myid IN (:param_1, :myid_1)")
def test_in_7(self):
self.assert_compile(
self.table1.c.myid.in_([literal('a'), literal('b')]),
"mytable.myid IN (:param_1, :param_2)")
def test_in_8(self):
self.assert_compile(self.table1.c.myid.in_(['a', literal('b')]),
"mytable.myid IN (:myid_1, :param_1)")
def test_in_9(self):
self.assert_compile(self.table1.c.myid.in_([literal(1) + 'a']),
"mytable.myid IN (:param_1 + :param_2)")
def test_in_10(self):
self.assert_compile(self.table1.c.myid.in_([literal('a') + 'a', 'b']),
"mytable.myid IN (:param_1 || :param_2, :myid_1)")
def test_in_11(self):
self.assert_compile(
self.table1.c.myid.in_(
[
literal('a') +
literal('a'),
literal('b')]),
"mytable.myid IN (:param_1 || :param_2, :param_3)")
def test_in_12(self):
self.assert_compile(self.table1.c.myid.in_([1, literal(3) + 4]),
"mytable.myid IN (:myid_1, :param_1 + :param_2)")
def test_in_13(self):
self.assert_compile(self.table1.c.myid.in_([literal('a') < 'b']),
"mytable.myid IN (:param_1 < :param_2)")
def test_in_14(self):
self.assert_compile(self.table1.c.myid.in_([self.table1.c.myid]),
"mytable.myid IN (mytable.myid)")
def test_in_15(self):
self.assert_compile(self.table1.c.myid.in_(['a', self.table1.c.myid]),
"mytable.myid IN (:myid_1, mytable.myid)")
def test_in_16(self):
self.assert_compile(self.table1.c.myid.in_([literal('a'),
self.table1.c.myid]),
"mytable.myid IN (:param_1, mytable.myid)")
def test_in_17(self):
self.assert_compile(
self.table1.c.myid.in_(
[
literal('a'),
self.table1.c.myid +
'a']),
"mytable.myid IN (:param_1, mytable.myid + :myid_1)")
def test_in_18(self):
self.assert_compile(
self.table1.c.myid.in_(
[
literal(1),
'a' +
self.table1.c.myid]),
"mytable.myid IN (:param_1, :myid_1 + mytable.myid)")
def test_in_19(self):
self.assert_compile(self.table1.c.myid.in_([1, 2, 3]),
"mytable.myid IN (:myid_1, :myid_2, :myid_3)")
def test_in_20(self):
self.assert_compile(self.table1.c.myid.in_(
select([self.table2.c.otherid])),
"mytable.myid IN (SELECT myothertable.otherid FROM myothertable)")
def test_in_21(self):
self.assert_compile(~self.table1.c.myid.in_(
select([self.table2.c.otherid])),
"mytable.myid NOT IN (SELECT myothertable.otherid FROM myothertable)")
def test_in_22(self):
self.assert_compile(
self.table1.c.myid.in_(
text("SELECT myothertable.otherid FROM myothertable")
),
"mytable.myid IN (SELECT myothertable.otherid "
"FROM myothertable)"
)
@testing.emits_warning('.*empty sequence.*')
def test_in_23(self):
self.assert_compile(self.table1.c.myid.in_([]),
"mytable.myid != mytable.myid")
def test_in_24(self):
self.assert_compile(
select([self.table1.c.myid.in_(select([self.table2.c.otherid]))]),
"SELECT mytable.myid IN (SELECT myothertable.otherid "
"FROM myothertable) AS anon_1 FROM mytable"
)
def test_in_25(self):
self.assert_compile(
select([self.table1.c.myid.in_(
select([self.table2.c.otherid]).as_scalar())]),
"SELECT mytable.myid IN (SELECT myothertable.otherid "
"FROM myothertable) AS anon_1 FROM mytable"
)
def test_in_26(self):
self.assert_compile(self.table1.c.myid.in_(
union(
select([self.table1.c.myid], self.table1.c.myid == 5),
select([self.table1.c.myid], self.table1.c.myid == 12),
)
), "mytable.myid IN ("
"SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1 "
"UNION SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_2)")
def test_in_27(self):
# test that putting a select in an IN clause does not
# blow away its ORDER BY clause
self.assert_compile(
select([self.table1, self.table2],
self.table2.c.otherid.in_(
select([self.table2.c.otherid],
order_by=[self.table2.c.othername],
limit=10, correlate=False)
),
from_obj=[self.table1.join(self.table2,
self.table1.c.myid == self.table2.c.otherid)],
order_by=[self.table1.c.myid]
),
"SELECT mytable.myid, "
"myothertable.otherid, myothertable.othername FROM mytable "
"JOIN myothertable ON mytable.myid = myothertable.otherid "
"WHERE myothertable.otherid IN (SELECT myothertable.otherid "
"FROM myothertable ORDER BY myothertable.othername "
"LIMIT :param_1) ORDER BY mytable.myid",
{'param_1': 10}
)
def test_in_28(self):
self.assert_compile(
self.table1.c.myid.in_([None]),
"mytable.myid IN (NULL)"
)
@testing.emits_warning('.*empty sequence.*')
def test_in_29(self):
self.assert_compile(self.table1.c.myid.notin_([]),
"mytable.myid = mytable.myid")
@testing.emits_warning('.*empty sequence.*')
def test_in_30(self):
self.assert_compile(~self.table1.c.myid.in_([]),
"mytable.myid = mytable.myid")
class MathOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
)
def _test_math_op(self, py_op, sql_op):
for (lhs, rhs, res) in (
(5, self.table1.c.myid, ':myid_1 %s mytable.myid'),
(5, literal(5), ':param_1 %s :param_2'),
(self.table1.c.myid, 'b', 'mytable.myid %s :myid_1'),
(self.table1.c.myid, literal(2.7), 'mytable.myid %s :param_1'),
(self.table1.c.myid, self.table1.c.myid,
'mytable.myid %s mytable.myid'),
(literal(5), 8, ':param_1 %s :param_2'),
(literal(6), self.table1.c.myid, ':param_1 %s mytable.myid'),
(literal(7), literal(5.5), ':param_1 %s :param_2'),
):
self.assert_compile(py_op(lhs, rhs), res % sql_op)
def test_math_op_add(self):
self._test_math_op(operator.add, '+')
def test_math_op_mul(self):
self._test_math_op(operator.mul, '*')
def test_math_op_sub(self):
self._test_math_op(operator.sub, '-')
def test_math_op_div(self):
if util.py3k:
self._test_math_op(operator.truediv, '/')
else:
self._test_math_op(operator.div, '/')
class ComparisonOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
)
def test_pickle_operators_one(self):
clause = (self.table1.c.myid == 12) & \
self.table1.c.myid.between(15, 20) & \
self.table1.c.myid.like('hoho')
eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause))))
def test_pickle_operators_two(self):
clause = tuple_(1, 2, 3)
eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause))))
def _test_comparison_op(self, py_op, fwd_op, rev_op):
dt = datetime.datetime(2012, 5, 10, 15, 27, 18)
for (lhs, rhs, l_sql, r_sql) in (
('a', self.table1.c.myid, ':myid_1', 'mytable.myid'),
('a', literal('b'), ':param_2', ':param_1'), # note swap!
(self.table1.c.myid, 'b', 'mytable.myid', ':myid_1'),
(self.table1.c.myid, literal('b'), 'mytable.myid', ':param_1'),
(self.table1.c.myid, self.table1.c.myid,
'mytable.myid', 'mytable.myid'),
(literal('a'), 'b', ':param_1', ':param_2'),
(literal('a'), self.table1.c.myid, ':param_1', 'mytable.myid'),
(literal('a'), literal('b'), ':param_1', ':param_2'),
(dt, literal('b'), ':param_2', ':param_1'),
(literal('b'), dt, ':param_1', ':param_2'),
):
# the compiled clause should match either (e.g.):
# 'a' < 'b' -or- 'b' > 'a'.
compiled = str(py_op(lhs, rhs))
fwd_sql = "%s %s %s" % (l_sql, fwd_op, r_sql)
rev_sql = "%s %s %s" % (r_sql, rev_op, l_sql)
self.assert_(compiled == fwd_sql or compiled == rev_sql,
"\n'" + compiled + "'\n does not match\n'" +
fwd_sql + "'\n or\n'" + rev_sql + "'")
def test_comparison_operators_lt(self):
self._test_comparison_op(operator.lt, '<', '>'),
def test_comparison_operators_gt(self):
self._test_comparison_op(operator.gt, '>', '<')
def test_comparison_operators_eq(self):
self._test_comparison_op(operator.eq, '=', '=')
def test_comparison_operators_ne(self):
self._test_comparison_op(operator.ne, '!=', '!=')
def test_comparison_operators_le(self):
self._test_comparison_op(operator.le, '<=', '>=')
def test_comparison_operators_ge(self):
self._test_comparison_op(operator.ge, '>=', '<=')
class NonZeroTest(fixtures.TestBase):
def _raises(self, expr):
assert_raises_message(
TypeError,
"Boolean value of this clause is not defined",
bool, expr
)
def _assert_true(self, expr):
is_(bool(expr), True)
def _assert_false(self, expr):
is_(bool(expr), False)
def test_column_identity_eq(self):
c1 = column('c1')
self._assert_true(c1 == c1)
def test_column_identity_gt(self):
c1 = column('c1')
self._raises(c1 > c1)
def test_column_compare_eq(self):
c1, c2 = column('c1'), column('c2')
self._assert_false(c1 == c2)
def test_column_compare_gt(self):
c1, c2 = column('c1'), column('c2')
self._raises(c1 > c2)
def test_binary_identity_eq(self):
c1 = column('c1')
expr = c1 > 5
self._assert_true(expr == expr)
def test_labeled_binary_identity_eq(self):
c1 = column('c1')
expr = (c1 > 5).label(None)
self._assert_true(expr == expr)
def test_annotated_binary_identity_eq(self):
c1 = column('c1')
expr1 = (c1 > 5)
expr2 = expr1._annotate({"foo": "bar"})
self._assert_true(expr1 == expr2)
def test_labeled_binary_compare_gt(self):
c1 = column('c1')
expr1 = (c1 > 5).label(None)
expr2 = (c1 > 5).label(None)
self._assert_false(expr1 == expr2)
class NegationTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_negate_operators_1(self):
for (py_op, op) in (
(operator.neg, '-'),
(operator.inv, 'NOT '),
):
for expr, expected in (
(self.table1.c.myid, "mytable.myid"),
(literal("foo"), ":param_1"),
):
self.assert_compile(py_op(expr), "%s%s" % (op, expected))
def test_negate_operators_2(self):
self.assert_compile(
self.table1.select((self.table1.c.myid != 12) &
~(self.table1.c.name == 'john')),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 "
"AND mytable.name != :name_1"
)
def test_negate_operators_3(self):
self.assert_compile(
self.table1.select((self.table1.c.myid != 12) &
~(self.table1.c.name.between('jack', 'john'))),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND "
"mytable.name NOT BETWEEN :name_1 AND :name_2"
)
def test_negate_operators_4(self):
self.assert_compile(
self.table1.select((self.table1.c.myid != 12) &
~and_(self.table1.c.name == 'john',
self.table1.c.name == 'ed',
self.table1.c.name == 'fred')),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND "
"NOT (mytable.name = :name_1 AND mytable.name = :name_2 "
"AND mytable.name = :name_3)"
)
def test_negate_operators_5(self):
self.assert_compile(
self.table1.select(
(self.table1.c.myid != 12) & ~self.table1.c.name),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND NOT mytable.name")
class LikeTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_like_1(self):
self.assert_compile(
self.table1.c.myid.like('somstr'),
"mytable.myid LIKE :myid_1")
def test_like_2(self):
self.assert_compile(
~self.table1.c.myid.like('somstr'),
"mytable.myid NOT LIKE :myid_1")
def test_like_3(self):
self.assert_compile(
self.table1.c.myid.like('somstr', escape='\\'),
"mytable.myid LIKE :myid_1 ESCAPE '\\'")
def test_like_4(self):
self.assert_compile(
~self.table1.c.myid.like('somstr', escape='\\'),
"mytable.myid NOT LIKE :myid_1 ESCAPE '\\'")
def test_like_5(self):
self.assert_compile(
self.table1.c.myid.ilike('somstr', escape='\\'),
"lower(mytable.myid) LIKE lower(:myid_1) ESCAPE '\\'")
def test_like_6(self):
self.assert_compile(
~self.table1.c.myid.ilike('somstr', escape='\\'),
"lower(mytable.myid) NOT LIKE lower(:myid_1) ESCAPE '\\'")
def test_like_7(self):
self.assert_compile(
self.table1.c.myid.ilike('somstr', escape='\\'),
"mytable.myid ILIKE %(myid_1)s ESCAPE '\\\\'",
dialect=postgresql.dialect())
def test_like_8(self):
self.assert_compile(
~self.table1.c.myid.ilike('somstr', escape='\\'),
"mytable.myid NOT ILIKE %(myid_1)s ESCAPE '\\\\'",
dialect=postgresql.dialect())
def test_like_9(self):
self.assert_compile(
self.table1.c.name.ilike('%something%'),
"lower(mytable.name) LIKE lower(:name_1)")
def test_like_10(self):
self.assert_compile(
self.table1.c.name.ilike('%something%'),
"mytable.name ILIKE %(name_1)s",
dialect=postgresql.dialect())
def test_like_11(self):
self.assert_compile(
~self.table1.c.name.ilike('%something%'),
"lower(mytable.name) NOT LIKE lower(:name_1)")
def test_like_12(self):
self.assert_compile(
~self.table1.c.name.ilike('%something%'),
"mytable.name NOT ILIKE %(name_1)s",
dialect=postgresql.dialect())
class BetweenTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_between_1(self):
self.assert_compile(
self.table1.c.myid.between(1, 2),
"mytable.myid BETWEEN :myid_1 AND :myid_2")
def test_between_2(self):
self.assert_compile(
~self.table1.c.myid.between(1, 2),
"mytable.myid NOT BETWEEN :myid_1 AND :myid_2")
def test_between_3(self):
self.assert_compile(
self.table1.c.myid.between(1, 2, symmetric=True),
"mytable.myid BETWEEN SYMMETRIC :myid_1 AND :myid_2")
def test_between_4(self):
self.assert_compile(
~self.table1.c.myid.between(1, 2, symmetric=True),
"mytable.myid NOT BETWEEN SYMMETRIC :myid_1 AND :myid_2")
def test_between_5(self):
self.assert_compile(
between(self.table1.c.myid, 1, 2, symmetric=True),
"mytable.myid BETWEEN SYMMETRIC :myid_1 AND :myid_2")
def test_between_6(self):
self.assert_compile(
~between(self.table1.c.myid, 1, 2, symmetric=True),
"mytable.myid NOT BETWEEN SYMMETRIC :myid_1 AND :myid_2")
class MatchTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_match_1(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"mytable.myid MATCH ?",
dialect=sqlite.dialect())
def test_match_2(self):
self.assert_compile(
self.table1.c.myid.match('somstr'),
"MATCH (mytable.myid) AGAINST (%s IN BOOLEAN MODE)",
dialect=mysql.dialect())
def test_match_3(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"CONTAINS (mytable.myid, :myid_1)",
dialect=mssql.dialect())
def test_match_4(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"mytable.myid @@ to_tsquery(%(myid_1)s)",
dialect=postgresql.dialect())
def test_match_5(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"CONTAINS (mytable.myid, :myid_1)",
dialect=oracle.dialect())
class ComposedLikeOperatorsTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_contains(self):
self.assert_compile(
column('x').contains('y'),
"x LIKE '%%' || :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_contains_escape(self):
self.assert_compile(
column('x').contains('y', escape='\\'),
"x LIKE '%%' || :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_contains_literal(self):
self.assert_compile(
column('x').contains(literal_column('y')),
"x LIKE '%%' || y || '%%'",
checkparams={}
)
def test_contains_text(self):
self.assert_compile(
column('x').contains(text('y')),
"x LIKE '%%' || y || '%%'",
checkparams={}
)
def test_not_contains(self):
self.assert_compile(
~column('x').contains('y'),
"x NOT LIKE '%%' || :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_not_contains_escape(self):
self.assert_compile(
~column('x').contains('y', escape='\\'),
"x NOT LIKE '%%' || :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_contains_concat(self):
self.assert_compile(
column('x').contains('y'),
"x LIKE concat(concat('%%', %s), '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_contains_concat(self):
self.assert_compile(
~column('x').contains('y'),
"x NOT LIKE concat(concat('%%', %s), '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_contains_literal_concat(self):
self.assert_compile(
column('x').contains(literal_column('y')),
"x LIKE concat(concat('%%', y), '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_contains_text_concat(self):
self.assert_compile(
column('x').contains(text('y')),
"x LIKE concat(concat('%%', y), '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_startswith(self):
self.assert_compile(
column('x').startswith('y'),
"x LIKE :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_startswith_escape(self):
self.assert_compile(
column('x').startswith('y', escape='\\'),
"x LIKE :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_not_startswith(self):
self.assert_compile(
~column('x').startswith('y'),
"x NOT LIKE :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_not_startswith_escape(self):
self.assert_compile(
~column('x').startswith('y', escape='\\'),
"x NOT LIKE :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_startswith_literal(self):
self.assert_compile(
column('x').startswith(literal_column('y')),
"x LIKE y || '%%'",
checkparams={}
)
def test_startswith_text(self):
self.assert_compile(
column('x').startswith(text('y')),
"x LIKE y || '%%'",
checkparams={}
)
def test_startswith_concat(self):
self.assert_compile(
column('x').startswith('y'),
"x LIKE concat(%s, '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_startswith_concat(self):
self.assert_compile(
~column('x').startswith('y'),
"x NOT LIKE concat(%s, '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_startswith_firebird(self):
self.assert_compile(
column('x').startswith('y'),
"x STARTING WITH :x_1",
checkparams={'x_1': 'y'},
dialect=firebird.dialect()
)
def test_not_startswith_firebird(self):
self.assert_compile(
~column('x').startswith('y'),
"x NOT STARTING WITH :x_1",
checkparams={'x_1': 'y'},
dialect=firebird.dialect()
)
def test_startswith_literal_mysql(self):
self.assert_compile(
column('x').startswith(literal_column('y')),
"x LIKE concat(y, '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_startswith_text_mysql(self):
self.assert_compile(
column('x').startswith(text('y')),
"x LIKE concat(y, '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_endswith(self):
self.assert_compile(
column('x').endswith('y'),
"x LIKE '%%' || :x_1",
checkparams={'x_1': 'y'}
)
def test_endswith_escape(self):
self.assert_compile(
column('x').endswith('y', escape='\\'),
"x LIKE '%%' || :x_1 ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_not_endswith(self):
self.assert_compile(
~column('x').endswith('y'),
"x NOT LIKE '%%' || :x_1",
checkparams={'x_1': 'y'}
)
def test_not_endswith_escape(self):
self.assert_compile(
~column('x').endswith('y', escape='\\'),
"x NOT LIKE '%%' || :x_1 ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_endswith_literal(self):
self.assert_compile(
column('x').endswith(literal_column('y')),
"x LIKE '%%' || y",
checkparams={}
)
def test_endswith_text(self):
self.assert_compile(
column('x').endswith(text('y')),
"x LIKE '%%' || y",
checkparams={}
)
def test_endswith_mysql(self):
self.assert_compile(
column('x').endswith('y'),
"x LIKE concat('%%', %s)",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_endswith_mysql(self):
self.assert_compile(
~column('x').endswith('y'),
"x NOT LIKE concat('%%', %s)",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_endswith_literal_mysql(self):
self.assert_compile(
column('x').endswith(literal_column('y')),
"x LIKE concat('%%', y)",
checkparams={},
dialect=mysql.dialect()
)
def test_endswith_text_mysql(self):
self.assert_compile(
column('x').endswith(text('y')),
"x LIKE concat('%%', y)",
checkparams={},
dialect=mysql.dialect()
)
class CustomOpTest(fixtures.TestBase):
def test_is_comparison(self):
c = column('x')
c2 = column('y')
op1 = c.op('$', is_comparison=True)(c2).operator
op2 = c.op('$', is_comparison=False)(c2).operator
assert operators.is_comparison(op1)
assert not operators.is_comparison(op2)
class TupleTypingTest(fixtures.TestBase):
def _assert_types(self, expr):
eq_(expr.clauses[0].type._type_affinity, Integer)
eq_(expr.clauses[1].type._type_affinity, String)
eq_(expr.clauses[2].type._type_affinity, LargeBinary()._type_affinity)
def test_type_coersion_on_eq(self):
a, b, c = column(
'a', Integer), column(
'b', String), column(
'c', LargeBinary)
t1 = tuple_(a, b, c)
expr = t1 == (3, 'hi', 'there')
self._assert_types(expr.right)
def test_type_coersion_on_in(self):
a, b, c = column(
'a', Integer), column(
'b', String), column(
'c', LargeBinary)
t1 = tuple_(a, b, c)
expr = t1.in_([(3, 'hi', 'there'), (4, 'Q', 'P')])
eq_(len(expr.right.clauses), 2)
for elem in expr.right.clauses:
self._assert_types(elem)
|
[
"mike_mp@zzzcomputing.com"
] |
mike_mp@zzzcomputing.com
|
ec356e53c4d259f06b48074389ec9b57fb66f575
|
199522cb43b4e2c7e3bf034a0e604794258562b1
|
/0x03-python-data_structures/7-add_tuple.py
|
96d715528f3d23cdf3d725a9838247a97a8e4635
|
[] |
no_license
|
jormao/holbertonschool-higher_level_programming
|
a0fd92f2332f678e6fe496057c04f2995d24a4ac
|
360b3a7294e9e0eadcadb57d4c48c22369c05111
|
refs/heads/master
| 2020-09-29T01:36:20.094209
| 2020-05-15T03:27:06
| 2020-05-15T03:27:06
| 226,915,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
#!/usr/bin/python3
def add_tuple(tuple_a=(), tuple_b=()):
if len(tuple_a) != 2:
if len(tuple_a) == 1:
tuple_a = (tuple_a[0], 0)
if len(tuple_a) == 0:
tuple_a = (0, 0)
if len(tuple_b) != 2:
if len(tuple_b) == 1:
tuple_b = (tuple_b[0], 0)
if len(tuple_b) == 0:
tuple_b = (0, 0)
tuple_c = ((tuple_a[0] + tuple_b[0]), (tuple_a[1] + tuple_b[1]))
return (tuple_c)
|
[
"jormao@gmail.com"
] |
jormao@gmail.com
|
b6aedc802e87484a48e035f95f533be5d35b6c1d
|
8d90e93d5c7c430bba840783efea760eb37d4f3c
|
/Sword Offer/31.py
|
bf77d964602d4c023a1a558211a271d34b9607f5
|
[] |
no_license
|
handsome-fish/Leetcode
|
a0639735d27979dc7c8c0a1e7fa381f17904b0ad
|
b3893a5cc6ff0f2311dcdef55766e3ba2a3ba812
|
refs/heads/master
| 2021-07-22T01:30:09.859679
| 2021-07-14T15:05:27
| 2021-07-14T15:05:27
| 178,343,527
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
"""
剑指 Offer 31. 栈的压入、弹出序列
输入两个整数序列,第一个序列表示栈的压入顺序,请判断第二个序列是否为该栈的弹出顺序。
假设压入栈的所有数字均不相等。例如,序列 {1,2,3,4,5} 是某栈的压栈序列,序列 {4,5,3,2,1} 是该压栈序列对应的一个弹出序列,
但 {4,3,5,1,2} 就不可能是该压栈序列的弹出序列。
示例 1:
输入:pushed = [1,2,3,4,5], popped = [4,5,3,2,1]
输出:true
解释:我们可以按以下顺序执行:
push(1), push(2), push(3), push(4), pop() -> 4,
push(5), pop() -> 5, pop() -> 3, pop() -> 2, pop() -> 1
示例 2:
输入:pushed = [1,2,3,4,5], popped = [4,3,5,1,2]
输出:false
解释:1 不能在 2 之前弹出。
提示:
0 <= pushed.length == popped.length <= 1000
0 <= pushed[i], popped[i] < 1000
pushed 是 popped 的排列。
"""
from typing import List
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
stack, popped = [], popped[::-1]
for i in pushed:
stack.append(i)
while stack and stack[-1] == popped[-1]:
stack.pop()
popped.pop()
return stack == []
|
[
"gitfish@163.com"
] |
gitfish@163.com
|
821605a24dc98e2f6d96ac6410769972c52bcdd4
|
fa5fb155ba4bc5f4335859b8a93b73be8c1a2abb
|
/tt.py
|
7cb12887e5f30f3d9851b55e6de33ba0bb50c54c
|
[] |
no_license
|
wujifan/test_allure
|
c9703a58d344bbb8825186f8c360595a5486506a
|
bf5cbb71b7a0781cba8d3a0da766592d680e8dc3
|
refs/heads/master
| 2023-03-31T09:44:10.391654
| 2021-04-07T09:26:44
| 2021-04-07T09:26:44
| 355,466,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
from base.init__driver import ini_data
a = ini_data('test_data', ['name', 'exp_value'])
|
[
"wujifan4811@163.com"
] |
wujifan4811@163.com
|
700e1bb227aa8d04f4608999e5cb91fc629ffc26
|
0bc1cf3ce50a035dc85b56e32bd930c91a8776c0
|
/blog/migrations/0001_initial.py
|
91ae6deba16376ca31124388427bb7421762a9a4
|
[] |
no_license
|
RiaLolwut/djangogirls
|
7c2fa64a2824b48fbbafc2f980d654f511562063
|
a81c14065bbf74ff04f5e8874b3acacc347651e9
|
refs/heads/master
| 2020-04-29T06:35:30.459881
| 2019-03-16T06:20:00
| 2019-03-16T06:20:00
| 175,921,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
# Generated by Django 2.0.13 on 2019-03-16 03:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"riaparishseo@gmail.com"
] |
riaparishseo@gmail.com
|
31a6c319b7c69134890d2911c524ff347b6efc76
|
4109762775f6a465639550b6de36b50450a209ad
|
/strategy/blackjack/playercheatstrategy.py
|
52da55b106d966347e46767a699c600de46c6a4c
|
[] |
no_license
|
samuelcstewart/oosd
|
7f8fb79f6cb5ea7a211d5f0ea6278a1eb475a534
|
5f15cdd34ec3db0a4fcf01be5e2cb00b7f6e221f
|
refs/heads/master
| 2020-04-15T18:37:10.148250
| 2015-05-14T08:51:20
| 2015-05-14T08:51:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
import abc
from strategy import Strategy
class PlayerCheatStrategy(Strategy):
""" Basic cheat strat, show the next card in deck """
def hit(self):
print("Next card: " + str(self.game_state.deck.cards[-1]))
return raw_input("h to hit or s to stand: ") == 'h'
|
[
"stewasc3@student.op.ac.nz"
] |
stewasc3@student.op.ac.nz
|
4698d40ef6e587e24e8f464698a390215afa3948
|
115bf3b584b489f34167e5e9d98eb53a6c03044b
|
/libcrowbar/configs/cpplint.py
|
e60e94019c192b30e58b7b1c196a6d53fa8cb4a4
|
[] |
no_license
|
npcardoso/PhDThesis
|
fe094414d695cf64db100d23997c0e347e2dea14
|
16062572cfe6e234856325c2a8c9e4c88a335236
|
refs/heads/master
| 2021-01-16T21:29:21.888085
| 2018-10-26T18:22:32
| 2018-10-26T18:22:32
| 63,144,416
| 37
| 15
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 234,786
|
py
|
#!/usr/bin/python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
for i in lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the //.
if Match(r'//[^ ]*\w', comment):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line)
if (match and match.group(1) != '(' and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is the end of template<>.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is end of a template parameter list, False otherwise.
"""
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for simple type and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']:
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
def IsRValueAllowed(clean_lines, linenum):
"""Check if RValue reference is allowed on a particular line.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if line is within the region where RValue references are allowed.
"""
# Allow region marked by PUSH/POP macros
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
# Allow operator=
line = clean_lines.elided[linenum]
if Search(r'\boperator\s*=\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
# Allow constructors
match = Match(r'\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
if match and match.group(1) == match.group(2):
return IsDeletedOrDefault(clean_lines, linenum)
if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
if Match(r'\s*[\w<>]+\s*\(', line):
previous_line = 'ReturnType'
if linenum > 0:
previous_line = clean_lines.elided[linenum - 1]
if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
return IsDeletedOrDefault(clean_lines, linenum)
return False
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
and_pos = len(match.group(1))
if IsRValueType(clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals and lambdas.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
# Suggest a different header for ostream
if include == 'ostream':
error(filename, linenum, 'readability/streams', 3,
'For logging, include "base/logging.h" instead of <ostream>.')
else:
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present,
# then it should be the last thing in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)]+)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
"""Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# A lambda introducer specifies a default capture if it starts with "[="
# or if it starts with "[&" _not_ followed by an identifier.
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# Found a potential error, check what comes after the lambda-introducer.
# If it's not open parenthesis (for lambda-declarator) or open brace
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4, # 4 = high confidence
'Default lambda captures are an unapproved C++ feature.')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*\bvirtual\b)', line)
if not virtual: return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(1))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Check that at most one of "override" or "final" is present, not both
line = clean_lines.elided[linenum]
if Search(r'\boverride\b', line) and Search(r'\bfinal\b', line):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Flag unapproved C++11 headers.
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
# utility
'forward',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
sys.stderr.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
|
[
"alexandrecperez@gmail.com"
] |
alexandrecperez@gmail.com
|
766d894a674bb679015ec516a8fdc1796ea7ebc7
|
46ae325c342957bdeddf9e92b2fbb97f769237f6
|
/Galois Counter Mode/Correc_TP1_AES.py
|
7a0dc8a2fb088d25a98f3ae96e1a4599db014564
|
[] |
no_license
|
DenizSungurtekin/Cryptography-and-security
|
557b029bb0a8dabc2b42176916da317478085f6a
|
4050d5be02e7186ada87084008f60f515970c073
|
refs/heads/main
| 2023-03-17T14:56:18.055721
| 2021-03-05T12:10:28
| 2021-03-05T12:10:28
| 344,783,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,154
|
py
|
# Here's the S-boxes as tuples. You can change their form if you prefer to
# work with lists or other structures.
S_box = (
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
)
S_box_inv = (
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
)
MixColMatrix = [[0x02,0x03,0x01,0x01],
[0x01,0x02,0x03,0x01],
[0x01,0x01,0x02,0x03],
[0x03,0x01,0x01,0x02]]
MixColInverse = [[0x0e,0x0b,0x0d,0x09],
[0x09,0x0e,0x0b,0x0d],
[0x0d,0x09,0x0e,0x0b],
[0x0b,0x0d,0x09,0x0e]]
# Applies the S-Box on 32 bit blocks (lists of 4 bytes)
def Sbox(lst,box):
result = [box[lst[0]],box[lst[1]],box[lst[2]],box[lst[3]]]
return result
# xor on two lists of 4 bytes
def xor_lst(lst1,lst2):
result = []
for i in range(4):
result.append(lst1[i] ^ lst2[i])
return result
# Polynomial product in GF(2^8), with irreductible polynomial x^8+x^4+x^3+x+1
def poly_multiplication(p1,p2):
liste_p1 = list(str(bin(p1))[2:])
res = 0
degrep1 = len(liste_p1)-1
for i in range(degrep1+1):
if liste_p1[i] == '1':
temp = p2 * pow(2,degrep1-i)
res = res ^ temp
while res > 255:
bigger_byte = res // 256
lesser_byte = res % 256
poly = 0b11011
res = lesser_byte ^ poly_multiplication(bigger_byte,poly)
return res
# Key expansion step, returns a list of keys (each key will be composed of four
# lists of 4 bytes). The given key is either 4x4, 6x4 or 8x4 bytes.
def key_expansion(key,box):
N = len(key)
if len(key) == 4:
steps = 10
elif len(key) == 6:
steps = 12
elif len(key) == 8:
steps = 14
else:
raise ValueError("Invalid Key Length")
rc_table = [0b00000001, 0b00000010 , 0b00000100 ,
0b00001000, 0b00010000 , 0b00100000 ,
0b01000000, 0b10000000 , 0b00011011 ,
0b00110110]
rcon_table = []
for i in range(10):
rcon_table.append([rc_table[i], 0b0, 0b0, 0b0])
Expanded_Key = []
for i in range(N):
Expanded_Key.append(key[i])
for i in range(N,4*(steps+1)):
if i % N == 0:
rotated = Expanded_Key[i-1][1:] + [Expanded_Key[i-1][0]]
Expanded_Key.append(xor_lst(xor_lst(Expanded_Key[i-N],
Sbox(rotated,box)),rcon_table[int(i/N)-1]))
elif N > 6 and i % N == 4:
Expanded_Key.append(xor_lst(Expanded_Key[i-N],Sbox(Expanded_Key[i-1],box)))
else:
Expanded_Key.append(xor_lst(Expanded_Key[i-N],Expanded_Key[i-1]))
return Expanded_Key
# ByteSub operation on a 4x4 matrix of bytes.
def ByteSub(matrix,sub_box):
result = []
for i in range(4):
result.append(Sbox(matrix[i],sub_box))
return result
# ShiftRow operation on a 4x4 matrix of bytes.
def ShiftRow(matrix, encrypt):
if encrypt == True :
row1 = [matrix[0][0],matrix[1][1],matrix[2][2],matrix[3][3]]
row2 = [matrix[1][0],matrix[2][1],matrix[3][2],matrix[0][3]]
row3 = [matrix[2][0],matrix[3][1],matrix[0][2],matrix[1][3]]
row4 = [matrix[3][0],matrix[0][1],matrix[1][2],matrix[2][3]]
else:
row1 = [matrix[0][0],matrix[3][1],matrix[2][2],matrix[1][3]]
row2 = [matrix[1][0],matrix[0][1],matrix[3][2],matrix[2][3]]
row3 = [matrix[2][0],matrix[1][1],matrix[0][2],matrix[3][3]]
row4 = [matrix[3][0],matrix[2][1],matrix[1][2],matrix[0][3]]
result = [row1,row2,row3,row4]
return result
# Vector Multiplication with polynomial multiplication for lists of 4 bytes,
# Returns an integer value (Addition is also polynomial).
def Vector_poly_multi(polylst,polylst2):
result = 0
for i in range(4):
temp = poly_multiplication(polylst[i],polylst2[i])
result = result ^ temp
return result
# MixColumn operation on a 4x4 matrix of bytes.
# We have to work with column instead of rows here, so we have to build the
# columns first
def MixColumn(matrix, mixcol):
result = [[],[],[],[]]
# Recuperation of the adequate column
for i in range(4):
col = matrix[i]
# Matrix multiplication, multiplies each time the corresponding row of
# MixCol with the column of values.
for j in range(4):
result[i].append(Vector_poly_multi(mixcol[j],col))
return result
# AddRoundKey operation on a 4x4 matrix of bytes.
def AddRoundKey(matrix,sub_key):
result = []
for i in range(4):
result.append(xor_lst(matrix[i],sub_key[i]))
return result
# Changes any message in a vector of 32-bit words, i.e. a matrix of bytes 4xN.
# In particular, this means a key will already be organised as words,
# And a 128 bit message will already we a 4x4 matrix of bytes.
# A byte is here just an integer value between 0 and 255.
def MessageToMatrix(string):
longueur = len(string)
matrice = []
for i in range(longueur):
byte = ord(string[i])
if i % 4 == 0:
matrice.append([byte])
else:
matrice[-1].append(byte)
return matrice
# Vice-versa : transforms a matrix 4xN of bytes into a string.
def MatrixToMessage(matrice):
string = ""
for word in matrice:
for j in range(4):
string += chr(word[j])
return string
# FINAL AES FUNCTION :
def AES(message,key,S_box, mixcol):
message = MessageToMatrix(message)
key = MessageToMatrix(key)
All_Keys = key_expansion(key,S_box)
Init_Key = All_Keys[0:4]
# First xor
message = AddRoundKey(message,Init_Key)
N = len(key)
if N == 4:
steps = 10
elif N == 6:
steps = 12
elif N == 8 :
steps = 14
else:
raise ValueError("Invalid Key Length")
for i in range(1,steps+1):
next_key = All_Keys[4*i:4*(i+1)]
message = ByteSub(message,S_box)
message = ShiftRow(message, True)
if i < steps:
message = MixColumn(message, mixcol)
message = AddRoundKey(message, next_key)
message = MatrixToMessage(message)
return message
# AES Decryption :
def AES_Inv(message,key,S_box, S_box_Inv, mixcol):
message = MessageToMatrix(message)
key = MessageToMatrix(key)
All_Keys = key_expansion(key,S_box)
Init_Key = All_Keys[0:4]
N = len(key)
if N == 4:
steps = 10
elif N == 6:
steps = 12
elif N == 8 :
steps = 14
else:
raise ValueError("Invalid Key Length")
for i in range(steps,0,-1):
next_key = All_Keys[4*i:4*(i+1)]
message = AddRoundKey(message, next_key)
if i < steps:
message = MixColumn(message, mixcol)
message = ShiftRow(message, False)
message = ByteSub(message,S_box_Inv)
# Reverse the First xor
message = AddRoundKey(message,Init_Key)
message = MatrixToMessage(message)
return message
message = "Two One Nine Two"
key = "Thats my Kung Fu"
#print(MessageToMatrix(message))
#ciphertext = AES(message,key,S_box,MixColMatrix)
#plaintext = AES_Inv(ciphertext, key, S_box, S_box_inv, MixColInverse)
#print(message)
#print(ciphertext)
#print(plaintext)
# This only does the minimum needed for TP2 : an AES_Box which uses blocks of
# 16 characters (i.e. 128 bits). To do exactly what was asked for TP1, it
# misses the part where you need to cut the message into 128 bits blocks.
|
[
"noreply@github.com"
] |
noreply@github.com
|
1a8275ca53430b7a7de208eaccbad36efa3e3dac
|
23e9b76b5702b364bba2e4565a1ab1b6eb5d1d21
|
/4. Project/face_tracking/face_tracker.py
|
e3316b4b9d8cd1f41be0010e42835409afee7f4c
|
[
"MIT"
] |
permissive
|
gjustin40/Pytorch-Cookbook
|
5b346598019e540b05019e3fa465e9d1132c914a
|
52a7f4e8c053c775d8c6c90ab0926540b2e1f7cf
|
refs/heads/master
| 2023-02-27T19:42:49.179167
| 2023-02-14T06:22:13
| 2023-02-14T06:22:13
| 148,883,451
| 0
| 1
|
MIT
| 2022-12-09T05:49:21
| 2018-09-15T08:20:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,418
|
py
|
from imutils.video import VideoStream, FPS
from centroidtracker import CentroidTracker
import numpy as np
import argparse
import imutils
import time
import cv2
prototxt = 'deploy.prototxt'
model = 'res10_300x300_ssd_iter_140000.caffemodel'
# model = 'opencv_face_detector.caffemodel'
confidence = 0.8
ct = CentroidTracker()
(H, W) = (None, None)
print("[INFO] loading model....")
net = cv2.dnn.readNetFromCaffe(prototxt, model)
parser = argparse.ArgumentParser()
parser.add_argument('--source', required=True, help='video or camera')
args = parser.parse_args()
if args.source == '0':
print("[INFO] starting video streams...")
stream = VideoStream(src=0).start()
else:
print("[INFO] starting video Capture...")
stream = cv2.VideoCapture(args.source)
fps = FPS().start()
time.sleep(2.0)
while True:
try:
if args.source == '0':
frame = stream.read()
frame = imutils.resize(frame, width=500)
else:
(grabbed, frame) = stream.read()
frame = imutils.resize(frame, width=500)
if W is None or H is None:
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (W, H), (104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
rects = []
# detections = [[]]
for i in range(0, detections.shape[2]):
if detections[0, 0, i, 2] > confidence:
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
rects.append(box.astype("int"))
(startX, startY, endX, endY) = box.astype('int')
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
text = f'ID {objectID}'
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] -10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0,255,0), -1)
except Exception as e:
# print('No Objects')
print(e)
cv2.imshow("frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
fps.update()
fps.stop()
print(fps.fps())
cv2.destropyAllWindows()
stream.stop()
|
[
"gjustin@naver.com"
] |
gjustin@naver.com
|
3d97b330f77024758577eaa3b15f7e554fac1016
|
d880f73ae1b791f73789ab51916e5eb74c6c2a23
|
/aiologger/filters.py
|
87cbba86d9771928ce63e25b5a45686b14645b7b
|
[
"MIT"
] |
permissive
|
decaz/aiologger
|
d16e651f1358416e51f84d4103fb037f39f085c8
|
94e9c126280d1ede315a6fc3531ac17c21a2c33a
|
refs/heads/master
| 2020-05-01T05:00:02.343984
| 2019-03-03T16:57:51
| 2019-03-03T16:57:51
| 177,288,933
| 0
| 0
|
MIT
| 2019-03-23T12:56:44
| 2019-03-23T12:56:43
| null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
import logging
class StdoutFilter(logging.Filter):
def filter(self, record):
return record.levelno in (logging.DEBUG, logging.INFO)
|
[
"magalhaesmartins@icloud.com"
] |
magalhaesmartins@icloud.com
|
4494b119785b3c4ad00149d3407d60b1f571ca22
|
4c7e44b17782f1f1823238cf874ff298900d98ef
|
/config.py
|
c2878ccab1deee984f877ba8569500e6dc40ab83
|
[] |
no_license
|
dynamodenis/blog-arena
|
c2195262f2dec199b159e6383a2b5f14a4046799
|
10be4fffe63c061b3c67c664458b45eee00e8b95
|
refs/heads/master
| 2022-06-29T18:36:06.261840
| 2020-05-09T07:10:48
| 2020-05-09T07:10:48
| 262,506,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
class Config:
pass
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG=True
class TestConfig(Config):
pass
config_options={
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
}
|
[
"dmbugua66@gmail.com"
] |
dmbugua66@gmail.com
|
f26a1a8661049360f37f28aae77619d1416b9783
|
25d8dd91bcdb2ae02e260d81a4c6e79417ee213e
|
/hw/project/text_classification/mapreduce/knn/map.py
|
fd155d3551f0310add2108b0b3053e14538b4875
|
[] |
no_license
|
huhuk/FBDP
|
4b885c2cccda24ffea4edcaad90e4495a6870819
|
95a4d3271c95d13db9ff1f7396177e34148bc970
|
refs/heads/master
| 2021-04-25T15:37:28.086901
| 2018-02-28T07:25:50
| 2018-02-28T07:25:50
| 109,667,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
#!/usr/bin/python3
import sys
import pickle
import numpy as np
import scipy as sp
from sklearn.feature_extraction.text import TfidfVectorizer
k = 3
def load(filename):
f = open(filename, 'rb')
obj = pickle.load(f)
f.close()
return obj
# count_vec = load('./tfidf.model')
x_train = load('./x_train.model')
y_train = load('./y_train.model')
x_test = load('./x_test.model')
n = x_train.shape[0]
one = sp.sparse.csr_matrix(np.ones(n).reshape((n,1)))
def get_dist(x):
dists = sp.sparse.linalg.norm((x_train - one * x), axis=1)
return sorted(zip(dists, y_train))[:k]
def get_cl(x):
aDict = dict()
cls = get_dist(x)
for j, i in cls:
if i not in aDict:
aDict[i] = 1
else:
aDict[i] += 1
if len(aDict) == 3:
ret = cls[0][1]
else:
for i, j in aDict.items():
if int(j) > 1:
ret = i
return ret
for i, line in enumerate(sys.stdin):
y= line.strip()
x = x_test[i]
print(y, get_cl(x))
|
[
"huhu_qs@163.com"
] |
huhu_qs@163.com
|
4eff2793d09f4ab95e340f10730da9548630656b
|
f348191ea2ee92f4154f9f2819dbc11ba209dc8c
|
/app.py
|
74b0f6f02805b342507aef2101c130969aa6cbb6
|
[
"MIT"
] |
permissive
|
ish-u/sparrow
|
f9be3e6bcd2df4a655fc18f4938fe7c95f2f09c6
|
18fb031a96c5443e6922c3b7d54f79cc4eb141bf
|
refs/heads/master
| 2020-12-03T12:08:21.958397
| 2020-01-05T09:58:24
| 2020-01-05T09:58:24
| 231,310,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,732
|
py
|
from hashlib import md5
import urllib, hashlib
from datetime import datetime
from flask import Flask, escape, request, render_template, redirect ,session ,flash
import sqlite3
from passlib.hash import sha256_crypt
from functools import wraps
from flask_avatars import Avatars
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'user_id' in session:
return f(*args, **kwargs)
else:
return redirect('/login')
return wrap
app = Flask(__name__)
avatars = Avatars(app)
app.secret_key = 'LOL'
@app.route('/')
@login_required
def hello():
conn = sqlite3.connect('users')
c = conn.cursor()
info = c.execute("SELECT * FROM info WHERE user=:user",{"user":session["user_id"]}).fetchone()
conn.commit()
user = session["user_id"]
status = c.execute(f"SELECT * FROM {user}")
return render_template("index.html",info = info,status=status)
conn.commit()
conn.close()
@app.route('/register',methods=['GET', 'POST'])
def register():
if request.method == 'POST':
conn = sqlite3.connect('users')
c = conn.cursor()
u_name = request.form.get("username")
name = request.form.get("name")
password = (request.form.get("password"))
email = request.form.get("email")
dob = request.form.get("age")
age = datetime.now().year - int(dob[0:4])
if not name or not password or not email or not u_name or not dob:
flash("ONE OR MORE FEILD ARE NOT LEFT EMPTY DURING SUBMISSION")
return redirect('/register')
elif password != request.form.get("confirmation"):
flash("PASSWORDS DON'T MATCH")
return redirect('/register')
if c.execute("SELECT * FROM info WHERE user =:user",{"user":name}).fetchone() != None:
flash("USERNAME ALREADY EXISTS")
return redirect("/register")
if c.execute("SELECT * FROM info WHERE email =:email",{"email":email}).fetchone() != None:
flash("EMAIL ALREADY EXISTS")
return redirect("/register")
passw = sha256_crypt.hash(password)
avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
c.execute("INSERT INTO info(user,pass,email,avatar,dob,age,name) VALUES(?,?,?,?,?,?,?)", (u_name, passw,email,avatar_hash,dob,age,name))
c.execute(f"CREATE TABLE {u_name} ('s_no' INTEGER PRIMARY KEY NOT NULL, 'post' TEXT NOT NULL, 'ddmmyy' DATETIME NOT NULL)")
conn.commit()
conn.close()
return redirect("/")
elif request.method == 'GET':
return render_template("register.html")
@app.route('/home')
def home():
return redirect("/")
@app.route('/login',methods=['POST','GET'])
def login():
if request.method == 'POST':
session.clear()
name = request.form.get("username")
password = request.form.get("password")
if not name or not password:
flash("ONE OR MORE FIELDS ARE LEFT EMPTY")
return redirect('/login')
conn = sqlite3.connect('users')
c = conn.cursor()
row = c.execute("SELECT * FROM info WHERE user = :name", {"name":name})
data = c.fetchone()
if data == None:
flash("BRU.. u have to register to login, that's how these things work i guess")
return redirect('/login')
passw = data[1]
if sha256_crypt.verify(password,passw):
session["user_id"] = data[0]
else:
flash("INCORRECT PASSWORD")
return redirect("/login")
conn.commit()
conn.close()
return redirect("/")
else:
return render_template("login.html")
@app.route('/logout',methods=['POST','GET'])
def logout():
session.clear()
return redirect('/')
@app.route('/feed' ,methods=['GET','POST'])
@login_required
def feed():
if request.method == 'POST':
status = request.form.get("status")
user = session["user_id"]
conn = sqlite3.connect('users')
c = conn.cursor()
avatar = c.execute("SELECT avatar FROM info WHERE user=:user",{"user":user}).fetchone()[0]
c.execute(f"INSERT INTO {user}(post,ddmmyy) VALUES(?,?)",(status,datetime.now()))
c.execute("INSERT INTO status(user,post,ddmmyy,avatar) VALUES(?,?,?,?)",(user,status,datetime.now(),avatar))
conn.commit()
data = c.execute("SELECT * FROM status")
return render_template("feed.html",data = data)
conn.commit()
conn.close()
else:
conn = sqlite3.connect('users')
c = conn.cursor()
data = c.execute("SELECT * FROM status")
return render_template("feed.html",data = data)
conn.commit()
conn.close()
@app.route('/people' ,methods=['GET','POST'])
@login_required
def people():
if request.method == 'POST':
search = request.form.get("search")
conn = sqlite3.connect('users')
c = conn.cursor()
data = c.execute("SELECT * FROM info WHERE user= :user",{"user":search})
return render_template("find_people.html",data=data)
conn.commit()
conn.close()
else:
conn = sqlite3.connect('users')
c = conn.cursor()
data = c.execute("SELECT * FROM info")
return render_template("find_people.html",data=data)
conn.commit()
conn.close()
@app.route('/user' ,methods=['GET','POST'])
@login_required
def user():
if request.method == 'POST':
user = request.form.get("button")
if user == session["user_id"]:
return redirect('/')
conn = sqlite3.connect('users')
c = conn.cursor()
info = c.execute("SELECT * FROM info WHERE user=:user",{"user":user}).fetchone()
conn.commit()
status = c.execute(f"SELECT * FROM {user}")
return render_template("user.html",info = info,status=status)
conn.commit()
conn.close()
@app.route('/status',methods=['POST','GET'])
@login_required
def redirect_status():
return render_template('status.html')
@app.route('/edit',methods=['POST','GET'])
@login_required
def edit():
status = request.form.get("edit")
conn = sqlite3.connect('users')
c = conn.cursor()
c.execute("UPDATE info SET status = :status WHERE user =:user",{"status":status,"user":session["user_id"]})
conn.commit()
conn.close()
return redirect('/')
#if __name__ == "__main__":
# app.run(debug=True)
|
[
"anmolgupta520@gmail.com"
] |
anmolgupta520@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.