repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
FreeON/spammpack
|
src-charm/tools/process_scaling_data.py
|
Python
|
bsd-3-clause
| 16,233
| 0.030863
|
#!/usr/bin/env python
class scaling_data:
def __init__ (self, filename):
import re
self.data = []
self.N_chunk = 0
self.N_basic = 0
fd = open(filename)
for line in fd:
if self.N_chunk == 0:
result = re.compile("^running.* -N ([0-9]+) -b ([0-9]*) ").search(line)
if result:
self.N_chunk = int(result.group(1))
self.N_basic = int(result.group(2))
if re.compile("running:").search(line):
self.append()
result = re.compile("lambda = (.*)$").search(line)
if result:
self.set_lambda(float(result.group(1)))
result = re.compile("complexity ratio = (.*)$").search(line)
if result:
self.set_complexity(float(result.group(1)))
result = re.compile("done multiplying using ([0-9]*) OpenMP.*tolerance (.*), (.*) seconds").search(line)
if result:
self.set_threads(int(result.group(1)))
self.set_tolerance(float(result.group(2)))
self.set_dense(False)
self.set_walltime(float(result.group(3)))
result = re.compile("done multiplying dense using ([0-9]*) OpenMP.* (.*) seconds").search(line)
if result:
self.set_threads(int(result.group(1)))
self.set_dense(True)
self.set_tolerance(0.0)
self.set_walltime(float(result.group(2)))
fd.close()
def info (self):
info = ""
info += "N = {:d}, N_basic = {:d}\n".format(self.N_chunk, self.N_basic)
info += "complexity: " + str(self.get_complexity()) + "\n"
info += "thread: " + str(self.get_threads())
return info
def append (self):
self.data.append({})
def set_dense (self, isDense):
self.data[-1]["isDense"] = isDense
def set_lambda (self, l):
self.data[-1]["lambda"] = l
def set_complexity (self, c):
self.data[-1]["complexity"] = c
def set_threads (self, P):
self.data[-1]["threads"] = P
def set_tolerance (self, t):
self.data[-1]["tolerance"] = t
def set_walltime (self, T):
self.data[-1]["walltime"] = T
def __str__ (self):
result = ""
for i in self.data:
result += str(i) + "\n"
return result
def get_complexity (self):
c = []
for i in self.data:
if not i["complexity"] in c:
c.append(i["complexity"])
return sorted(c, reverse = True)
def get_tolerance (self):
tau = []
for i in self.data:
if not i["tolerance"] in tau:
tau.append(i["tolerance"])
return sorted(tau)
def get_threads (self):
t = []
for i in self.data:
if not i["threads"] in t:
t.append(i["threads"])
return sorted(t)
def get_record (
self,
isDense = False,
complexity = None,
threads = None,
tolerance = None):
result = []
for i in self.data:
next_result = i
if complexity and i["complexity"] != complexity:
next_result = None
if tolerance and i["tolerance"] != tolerance:
next_result = None
if threads and i["threads"] != threads:
next_result = None
if isDense != i["isDense"]:
next_result = None
if next_result:
result.append(next_result)
return result
def flatten_list (l):
while True:
temp = []
isFlat = True
for i in l:
if type(i) == type([]):
isFlat = False
for j in i:
temp.append(j)
else:
temp.append(i)
l = temp
if isFlat:
break
return l
def plot_walltime_vs_complexity (data, options):
import matplotlib.pyplot as plt
plt.figure(
figsize = (
options.width/100.0,
options.height/100.0
),
dpi = 100
)
complexity_values = data.get_complexity()
if options.thread:
thread_values = sorted([ int(i) for i in options.thread ])
else:
thread_values = data.get_threads()
max_speedup = 1
for t in thread_values:
walltime = []
for c in complexity_values:
query = data.get_record(complexity = c, threads = t)
if len(query) == 0:
raise Exception("can not find result for "
+ "complexity {:1.3f} and {:d} threads".format(c, t))
walltime.append(query[0]["walltime"])
if max_speedup < max([ walltime[0]/i for i in walltime ]):
max_speedup = max([ walltime[0]/i for i in walltime ])
plt.loglog(
complexity_values,
[ walltime[0]/i for i in walltime ],
linestyle = "-",
marker = "o",
label = "{:d} threads".format(t)
)
plt.loglog(
complexity_values,
[ 1/i for i in complexity_values ],
color = "black",
label = "ideal"
)
plt.grid(True)
plt.xlim([min(complexity_values), max(complexity_values)])
plt.ylim([1, max_speedup])
plt.gca().invert_xaxis()
plt.legend(loc = "upper left")
plt.xlabel("co
|
mplexity")
plt.ylabel("parallel speedup")
if not options.no_title:
plt.title("N = {:d}, N_basic = {:d}".format(data.N_chunk, data.N_basic))
if options.output:
plt.savefig(options.output + "_
|
walltime_vs_complexity.png")
def plot_walltime_vs_tolerance (data, options):
import matplotlib.pyplot as plt
plt.figure(
figsize = (
options.width/100.0,
options.height/100.0
),
dpi = 100
)
tolerance_values = data.get_tolerance()
if options.thread:
thread_values = sorted([ int(i) for i in options.thread ])
else:
thread_values = data.get_threads()
max_speedup = 1
for t in thread_values:
walltime = []
for c in tolerance_values:
query = data.get_record(tolerance = c, threads = t)
if len(query) == 0:
raise Exception("can not find result for "
+ "tolerance {:e} and {:d} threads".format(c, t))
walltime.append(query[0]["walltime"])
if max_speedup < max([ walltime[0]/i for i in walltime ]):
max_speedup = max([ walltime[0]/i for i in walltime ])
plt.semilogx(
tolerance_values,
walltime,
linestyle = "-",
marker = "o",
label = "{:d} threads".format(t)
)
plt.grid(True)
plt.legend(loc = "upper right")
plt.xlabel("tolerance")
plt.ylabel("walltime [s]")
if not options.no_title:
plt.title("N = {:d}, N_basic = {:d}".format(data.N_chunk, data.N_basic))
if options.output:
plt.savefig(options.output + "_walltime_vs_tolerance.png")
def plot_walltime_vs_threads (data, options):
import matplotlib.pyplot as plt
plt.figure(
figsize = (
options.width/100.0,
options.height/100.0
),
dpi = 100
)
if options.complexity:
complexity_values = sorted(
[ float(i) for i in options.complexity ],
reverse = True
)
else:
complexity_values = data.get_complexity()
thread_values = data.get_threads()
for c in complexity_values:
walltime = []
for t in thread_values:
query = data.get_record(complexity = c, threads = t)
if len(query) == 0:
raise Exception("can not find SpAMM result for {:d} threads".format(t))
walltime.append(query[0]["walltime"])
plt.plot(
thread_values,
[ walltime[0]/i for i in walltime ],
linestyle = "-",
marker = "o",
label = "complexity {:1.3f}".format(c)
)
walltime = []
for t in thread_values:
query = data.get_record(isDense = True, threads = t)
if len(query) == 0:
raise Exception("can not find dense result for {:d} threads".format(t))
walltime.append(query[0]["walltime"])
plt.plot(
thread_values,
[ walltime[0]/i for i in walltime ],
linestyle = "-",
marker = "*",
label = "dense"
)
plt.plot(
thread_values,
thread_values,
color = "black",
label = "ideal"
)
plt.grid(True)
plt.legend(loc = "upper left")
plt.xlim([min(thread_values), max(thread_values)])
plt.ylim([min(thread_values), max(thread_values)])
plt.xlabel("threads")
plt.ylabel("parallel speedup")
if not options.no_title:
plt.title("N = {:d}, N_basic = {:d}".format(data.N_chunk, data.N_basic))
if options.output:
plt.savefig(options.output + "_walltime_vs_threads.png")
def plot_walltime (data, options):
|
ShaftesburySchoolDorset/PythonChallenges
|
algorithm_identification/1.py
|
Python
|
gpl-3.0
| 334
| 0.038922
|
#!/usr/local/bi
|
n/python3
def one(alist):
for passnum in range(len(alist)-1,0,-1):
for i in range(passnum):
if alist[i]>alist[i+1]:
temp = alist[i]
|
alist[i] = alist[i+1]
alist[i+1] = temp
alist = [54,26,93,17,77,31,44,55,20]
print(alist)
one(alist)
print(alist)
|
terzeron/FeedMakerApplications
|
study/_spring/capture_item_link_title.py
|
Python
|
gpl-2.0
| 1,528
| 0.001309
|
#!/usr/bin/env
|
python
import sys
import re
import getopt
from feed_maker_util import IO
def main():
state = 0
url_prefix = "https://spring.io"
result_list = []
num_of_recent_feeds = 1000
optlist, _ = getopt.getopt(sys.argv[1:], "n:")
for o, a in optlist:
if o
|
== '-n':
num_of_recent_feeds = int(a)
for line in IO.read_stdin_as_line_list():
if state == 0:
if re.search(r'<h2 class="blog--title">', line):
state = 1
elif state == 1:
m = re.search(r'<a href="(?P<link>[^"]+)">(?P<title>.*)</a>', line)
if m:
link = url_prefix + m.group("link")
title = m.group("title")
if re.search(r'([Bb]ootiful ([Pp]odcast|GCP)|[Aa]vailable|[Rr]eleased|(\d+\.\d+\.\d+(.| )(M\d+|RC\d+|RELEASE)\)?$)|This [Ww]eek|now GA|goes (GA|RC\d+)|is out|SpringOne2GX|[Ww]ebinar|SR\d)', title):
state = 0
continue
title = re.sub(r'&', '&', title)
state = 2
elif state == 2:
m = re.search(r'<time class=("|\')date("|\')[^>]*datetime="(?P<date>20\d+-\d+-\d+) ', line)
if m:
date = m.group("date")
title = date + " " + title
result_list.append((link, title))
state = 0
for (link, title) in result_list[:num_of_recent_feeds]:
print("%s\t%s" % (link, title))
if __name__ == "__main__":
sys.exit(main())
|
yutian2011/IPProxy
|
proxy.py
|
Python
|
gpl-3.0
| 5,041
| 0.011902
|
# -*- coding: utf-8 -*-
from queue import Queue
from lxml import etree
import requests
import random
from settings import *
import time
import socket
from pybloom_live import BloomFilter
from settings import log
import os
from settings import REFRESH_BF
from settings import MIN_NUM
import redis
import threading
import traceback
bloom = BloomFilter(capacity=10000000, error_rate=0.001)
def get_pages(url):
try:
headers["User-Agent"] = random.choice(USER_AGENT_LIST)
r = requests.get(url,headers=headers)
if r.ok:
return r.content
else:
return None
except Exception as e:
log.error("PID:%d error:%s url:%s" % (os.getpid(),traceback.format_exc(),url))
return None
def parse_page(url, page, pattern):
page = etree.HTML(page.lower())
#page = etree.HTML(page.lower().decode('utf-8'))
ips = page.xpath(pattern["ip"])
ports = page.xpath(pattern["port"])
ty = page.xpath(pattern["type"])
if ips == None or ports =
|
= None or ty == None:
raise ValueError("current page "+str(ips)+str(ports)+str(ty))
for i in range(len(ips)):
ret = {}
str = "%s:%s"
|
ret["ip_port"] = str%(ips[i].text,ports[i].text)
#print(url, ret["ip_port"], ty[i].text)
if ty[i].text.find("http") >= 0:
ret["type"] = 0
elif ty[i].text.find("https") >= 0:
ret["type"] = 1
else:
log.error("PID:%d page:%s can not get proxy type" % (os.getpid(), url))
yield ret
def get_and_check(url,pattern,q):
try:
page = get_pages(url)
if page == None:
return
lists = parse_page(url, page, pattern)
for ele in lists:
is_existed = ele["ip_port"] in bloom
#log.debug("PID:%d proxy worker ip %s is_existed %d" % (os.getpid(),ele["ip_port"],is_existed))
if is_existed == False:
try:
bloom.add(ele["ip_port"])
except Exception as e:
log.error("PID:%d bloom filter error:%s ip:%s" % (os.getpid(),e,ele["ip_port"]))
#url, ip, is_http, store_cookies, use_default_cookies, check_anonymity,
ele["name"] = "global"
ele["db"] = 0
ele["url"] = TEST_URL
ele["store_cookies"] = STORE_COOKIE
ele["use_default_cookies"] = USE_DEFAULT_COOKIE
ele["check_anonymity"] = True
q.put(ele)
except Exception as e:
log.error("PID:%d parse page error %s " % (os.getpid(), traceback.format_exc()))
def worker(pattern,q):
try:
num = pattern["page_range"]
for i in range(len(pattern["url"])):
index = pattern["url"][i].find("%d")
log.debug("PID:%d url:%s" % (os.getpid(), str(pattern)))
if index == -1:
get_and_check(pattern["url"][i],pattern,q)
time.sleep(10)
continue
for j in range(1,num+1):
url = pattern["url"][i] % j
get_and_check(url,pattern,q)
time.sleep(10)
except Exception as e:
log.error("PID:%d proxy url error:%s %s " % (os.getpid(),traceback.format_exc(), str(pattern)))
def db_zcount():
r = redis.StrictRedis(REDIS_SERVER,REDIS_PORT,DB_FOR_IP, decode_responses=True)
return r.zcard("proxy:counts")
def get_proxy(q):
#bloom.clear_all()
times = 0
while True:
try:
num = db_zcount()
log.debug("PID:%d db current ips %d------" % (os.getpid(),num))
while num > MIN_NUM:
time.sleep(REFRESH_WEB_SITE_TIMEER)
times += 1
if times == REFRESH_BF:
#bloom.clear()
bloom = BloomFilter(capacity=100000, error_rate=0.001)
times = 0
log.debug("PID:%d refresh bloom filter" % os.getpid())
t1 = time.time()
threads = []
for key,value in list(URL_PATTERN.items()):
thread = threading.Thread(target=worker,args=(value,q))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
t2 = time.time()
t = REFRESH_WEB_SITE_TIMEER - (t2 - t1)
times += 1
if t > 0:
time.sleep(t)
log.debug("PID:%d proxy sleep end------" % os.getpid())
if times == REFRESH_BF:
#bloom.clear()
bloom = BloomFilter(capacity=100000, error_rate=0.001)
times = 0
log.debug("PID:%d refresh bloom filter" % os.getpid())
except Exception as e:
log.error("PID:%d proxy error:%s" % (os.getpid(), traceback.format_exc()))
if __name__ == "__main__":
q = Queue()
get_proxy(q)
#worker(URL_PATTERN[URL_LIST[0]],q)
|
santiavenda2/griffith
|
lib/plugins/movie/PluginMovieIMDB-pt.py
|
Python
|
gpl-2.0
| 7,570
| 0.004761
|
# -*- coding: UTF-8 -*-
__revision__ = '$Id$'
# Copyright (c) 2011 Ivo Nunes
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# You may use and distribute this software under the terms of the
# GNU General Public License, version 2 or later
import gutils
import movie
import string
import re
import urllib
plugin_name = "IMDb-pt"
plugin_description = "Internet Movie Database Portuguese"
plugin_url = "www.imdb.pt"
plugin_language = _("Portuguese")
plugin_author = "Ivo Nunes"
plugin_author_email = "<netherblood@gmail.com>"
plugin_version = "0.1"
class Plugin(movie.Movie):
def __init__(self, id):
self.encode ='iso-8859-1'
self.movie_id = id
self.url = "http://www.imdb.pt/title/tt" + str(self.movie_id)
def initialize(self):
self.page = gutils.convert_entities(self.page)
self.cast_page = self.open_page(url=self.url + '/fullcredits')
self.cast_page = gutils.convert_entities(self.cast_page)
self.plot_page = self.open_page(url=self.url + '/plotsummary')
self.plot_page = gutils.convert_entities(self.plot_page)
def get_image(self):
self.image_url = gutils.trim(self.page, u'src="http://ia.media-imdb.com/images/', u'.jpg" /></a>')
self.image_url = "http://ia.media-imdb.com/images/" + self.image_url + ".jpg"
def get_title(self):
self.title = gutils.trim(self.page, u'<title>', u' (')
self.title = self.title.encode(self.encode)
def get_o_title(self):
self.o_title = gutils.trim(self.page, u'Conhecido Como:</h5><div class="info-content">"', u'"')
self.o_title = self.o_title.encode(self.encode)
def get_director(self):
self.director = gutils.trim(self.page, u'<h5>Diretor:</h5>', u'</a><br/>')
self.director = gutils.strip_tags(self.director)
def get_plot(self):
self.plot = gutils.trim(self.plot_page, u'<div id="swiki.
|
2.1">', u'</div>')
self.plot = gutils.strip_tags(self.plot)
self.plot = self.plot.encode(self.encode)
def get_year(self):
self.year = gutils.trim(self.page, u' (', u')</title>')
def get_runtime(self):
self.runtime = gutils.trim(self.page, u'<h5>Duração:</h5><div class="info-content">', u' min')
self.runtime = self.runtime.encode(self.encode)
def get_genre(self):
self.genre = gutils.trim(self.page, u'<h5>Gênero:</h5>', u'</div>')
s
|
elf.genre = gutils.strip_tags(self.genre)
self.genre = string.replace(self.genre, " | ", ", ")
self.genre = self.genre.encode(self.encode)
def get_cast(self):
self.cast = ''
self.cast = gutils.trim(self.cast_page, '<table class="cast">', '</table>')
if self.cast == '':
self.cast = gutils.trim(self.page, '<table class="cast">', '</table>')
self.cast = string.replace(self.cast, ' ... ', _(' como ').encode('utf8'))
self.cast = string.replace(self.cast, '...', _(' como ').encode('utf8'))
self.cast = string.replace(self.cast, '</tr><tr>', "\n")
self.cast = re.sub('</tr>[ \t]*<tr[ \t]*class="even">', "\n", self.cast)
self.cast = re.sub('</tr>[ \t]*<tr[ \t]*class="odd">', "\n", self.cast)
self.cast = self.__before_more(self.cast)
self.cast = re.sub('[ ]+', ' ', self.cast)
def get_classification(self):
self.classification = gutils.trim(self.page, u'<h5>Certificação:</h5><div class="info-content">', u'</div>')
self.classification = gutils.strip_tags(self.classification)
self.classification = string.replace(self.classification, " | ", ", ")
self.classification = self.classification.encode(self.encode)
def get_studio(self):
self.studio = gutils.trim(self.page, u'<h5>Companhia :</h5><div class="info-content">', u'Exibir mais</a>')
self.studio = gutils.strip_tags(self.studio)
self.studio = self.studio.encode(self.encode)
def get_o_site(self):
self.o_site = ""
def get_site(self):
self.site = self.url
def get_trailer(self):
self.trailer = "http://www.imdb.com/title/" + str(self.movie_id) + "/trailers"
def get_country(self):
self.country = gutils.trim(self.page, u'<h5>País:</h5><div class="info-content">', '</div>')
self.country = string.replace(self.country, " | ", ", ")
self.country = self.country.encode(self.encode)
def get_notes(self):
self.notes = ''
def get_rating(self):
self.rating = gutils.trim(self.page, u'<div class="starbar-meta">', '/10')
self.rating = gutils.strip_tags(self.rating)
self.rating = string.replace(self.rating, ",", ".")
if self.rating:
self.rating = float(self.rating)
self.rating = round(self.rating)
def get_screenplay(self):
self.screenplay = ''
parts = re.split('<a href=', gutils.trim(self.cast_page, u'>Créditos como roteirista<', '</table>'))
if len(parts) > 1:
for part in parts[1:]:
screenplay = gutils.trim(part, '>', '<')
if screenplay == 'WGA':
continue
screenplay = screenplay.replace(' (escrito por)', '')
screenplay = screenplay.replace(' and<', '<')
self.screenplay = self.screenplay + screenplay + ', '
if len(self.screenplay) > 2:
self.screenplay = self.screenplay[0:len(self.screenplay) - 2]
def get_cameraman(self):
self.cameraman = string.replace('<' + gutils.trim(self.cast_page, u'>Direção de Fotografia de<', '</table>'), u'(diretor de fotografia) ', '')
def __before_more(self, data):
for element in [u'>Exibir mais<', '>Full summary<', '>Full synopsis<']:
tmp = string.find(data, element)
if tmp>0:
data = data[:tmp] + '>'
return data
class SearchPlugin(movie.SearchMovie):
PATTERN = re.compile(r"""<a href=['"]/title/tt([0-9]+)/[^>]+[>](.*?)</td>""")
def __init__(self):
self.original_url_search = 'http://www.imdb.pt/find?s=tt&q='
self.translated_url_search = 'http://www.imdb.pt/find?s=tt&q='
self.encode = 'utf8'
def search(self, parent_window):
"""Perform the web search"""
if not self.open_search(parent_window):
return None
return self.page
def get_searches(self):
"""Try to find both id and film title for each search result"""
elements = string.split(self.page, '<tr')
if len(elements):
for element in elements[1:]:
match = self.PATTERN.findall(element)
if len(match) > 1:
tmp = re.sub('^[0-9]+[.]', '', gutils.clean(match[1][1]))
self.ids.append(match[1][0])
self.titles.append(tmp)
|
RTHMaK/RPGOne
|
deep_qa-master/deep_qa/models/__init__.py
|
Python
|
apache-2.0
| 3,009
| 0.00432
|
from .entailment.decomposable_attention import DecomposableAttention
from .memory_networks.differentiable_search import DifferentiableSearchMemoryNetwork
from .memory_networks.memory_network import MemoryNetwork
from .memory_networks.softmax_memory_network import SoftmaxMemoryNetwork
from .multiple_choice_qa.decomposable_attention import MultipleTrueFalseDecomposableAttention
from .multiple_choice_qa.tuple_inference import TupleInferenceModel
from .multiple_choice_qa.multiple_true_false_memory_network import MultipleTrueFalseMemoryNetwork
from .multiple_choice_qa.multiple_true_false_similarity import MultipleTrueFalseSimilarity
from .multiple_choice_qa.question_answer_memory_network import QuestionAnswerMemoryNetwork
from .multiple_choice_qa.question_answer_similarity import QuestionAnswerSimilarity
from .multiple_choice_qa.tuple_entailment import MultipleChoiceTupleEntailmentModel
from .sentence_selection.siamese_sentence_selector import SiameseSentenceSelector
from .sequence_tagging.simple_tagger import SimpleTagger
from .reading_comprehe
|
nsion.attention_sum_reader import AttentionSumReader
from .reading_comprehension.gated_attention_reader import GatedAttentionReader
from .reading_comprehension.bidirectional_attention import BidirectionalAttentionFlow
from .text_classification.true_false_model import TrueFalseModel
from ..training import concrete_
|
pretrainers
from .memory_networks.pretrainers.attention_pretrainer import AttentionPretrainer
from .memory_networks.pretrainers.snli_pretrainer import SnliAttentionPretrainer, SnliEntailmentPretrainer
from .text_pretrainers.encoder_pretrainer import EncoderPretrainer
concrete_models = { # pylint: disable=invalid-name
'AttentionSumReader': AttentionSumReader,
'BidirectionalAttentionFlow': BidirectionalAttentionFlow,
'DecomposableAttention': DecomposableAttention,
'DifferentiableSearchMemoryNetwork': DifferentiableSearchMemoryNetwork,
'GatedAttentionReader': GatedAttentionReader,
'MemoryNetwork': MemoryNetwork,
'MultipleTrueFalseDecomposableAttention': MultipleTrueFalseDecomposableAttention,
'MultipleTrueFalseMemoryNetwork': MultipleTrueFalseMemoryNetwork,
'MultipleTrueFalseSimilarity': MultipleTrueFalseSimilarity,
'MultipleChoiceTupleEntailmentModel': MultipleChoiceTupleEntailmentModel,
'QuestionAnswerMemoryNetwork': QuestionAnswerMemoryNetwork,
'QuestionAnswerSimilarity': QuestionAnswerSimilarity,
'SiameseSentenceSelector': SiameseSentenceSelector,
'SimpleTagger': SimpleTagger,
'SoftmaxMemoryNetwork': SoftmaxMemoryNetwork,
'TrueFalseModel': TrueFalseModel,
'TupleInferenceModel': TupleInferenceModel
}
concrete_pretrainers['AttentionPretrainer'] = AttentionPretrainer
concrete_pretrainers['SnliAttentionPretrainer'] = SnliAttentionPretrainer
concrete_pretrainers['SnliEntailmentPretrainer'] = SnliEntailmentPretrainer
concrete_pretrainers['EncoderPretrainer'] = EncoderPretrainer
|
jmesteve/saas3
|
openerp/addons_extra/l10n_es_fiscal_year_closing/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,141
| 0.001753
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
|
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############
|
################################################################
"""
Spanish Fiscal Year Closing Wizards
"""
import wizard_run
|
smallyear/linuxLearn
|
salt/salt/utils/openstack/neutron.py
|
Python
|
apache-2.0
| 33,523
| 0.00006
|
# -*- coding: utf-8 -*-
'''
Neutron class
'''
# Import python libs
from __future__ import absolute_import, with_statement
import logging
# Import third party libs
import salt.ext.six as six
# pylint: disable=import-error
HAS_NEUTRON = False
try:
from neutronclient.v2_0 import client
from neutronclient.shell import NeutronShell
HAS_NEUTRON = True
except ImportError:
pass
# pylint: enable=import-error
# Import salt libs
from salt import exceptions
# Get logging started
log = logging.getLogger(__name__)
def check_neutron():
return HAS_NEUTRON
def sanitize_neutronclient(kwargs):
variables = (
'username', 'user_id', 'password', 'token', 'tenant_name',
'tenant_id', 'auth_url', 'service_type', 'endpoint_type',
'region_name', 'endpoint_url', 'timeout', 'insecure',
'ca_cert', 'retries', 'raise_error', 'session', 'auth'
)
ret = {}
for var in six.iterkeys(kwargs):
if var in variables:
ret[var] = kwargs[var]
return ret
# Function alias to not shadow built-ins
class SaltNeutron(NeutronShell):
'''
Class for all neutronclient functions
'''
def __init__(self, username, tenant_name, auth_url, password=None,
region_name=None, service_type=None, **kwargs):
'''
Set up neutron credentials
'''
if not HAS_NEUTRON:
return None
self.kwargs = kwargs.copy()
self.kwargs['username'] = username
self.kwargs['tenant_name'] = tenant_name
self.kwargs['auth_url'] = auth_url
self.kwargs['service_type'] = service_type
self.kwargs['password'] = password
self.kwargs['region_name'] = region_name
self.kwargs = sanitize_neutronclient(self.kwargs)
self.network_conn = client.Client(**self.kwargs)
@staticmethod
def _fetch(resources, name_or_id):
ret = []
for resource in resources:
if resource['id'] == name_or_id:
return resource
if resource.get('name') == name_or_id:
ret.append(resource)
if len(ret) == 0:
raise exceptions.MinionError("Resource not found.")
elif len(ret) >= 2:
raise exceptions.MinionError("Multiple resource matches f
|
ound.")
else:
return ret[0]
def _find_port_id(self, resource):
re
|
source = self._fetch_port(resource)
return resource['id']
def _find_network_id(self, resource):
resource = self._fetch_network(resource)
return resource['id']
def _find_subnet_id(self, resource):
resource = self._fetch_subnet(resource)
return resource['id']
def _find_router_id(self, resource):
resource = self._fetch_router(resource)
return resource['id']
def _find_security_group_id(self, resource):
resource = self._fetch_security_group(resource)
return resource['id']
def _find_vpnservice_id(self, resource):
resource = self._fetch_vpnservice(resource)
return resource['id']
def _find_ipsec_site_connection_id(self, resource):
resource = self._fetch_ipsec_site_connection(resource)
return resource['id']
def _find_ikepolicy_id(self, resource):
resource = self._fetch_ikepolicy(resource)
return resource['id']
def _find_ipsecpolicy_id(self, resource):
resource = self._fetch_ipsecpolicy(resource)
return resource['id']
def _fetch_port(self, name_or_id):
resources = self.list_ports()['ports']
return self._fetch(resources, name_or_id)
def _fetch_network(self, name_or_id):
resources = self.list_networks()['networks']
return self._fetch(resources, name_or_id)
def _fetch_subnet(self, name_or_id):
resources = self.list_subnets()['subnets']
return self._fetch(resources, name_or_id)
def _fetch_router(self, name_or_id):
resources = self.list_routers()['routers']
return self._fetch(resources, name_or_id)
def _fetch_security_group(self, name_or_id):
resources = self.list_security_groups()['security_groups']
return self._fetch(resources, name_or_id)
def _fetch_vpnservice(self, name_or_id):
resources = self.list_vpnservices()['vpnservices']
return self._fetch(resources, name_or_id)
def _fetch_ipsec_site_connection(self, name_or_id):
resources = (self.list_ipsec_site_connections()
['ipsec_site_connections'])
return self._fetch(resources, name_or_id)
def _fetch_ikepolicy(self, name_or_id):
resources = self.list_ikepolicies()['ikepolicies']
return self._fetch(resources, name_or_id)
def _fetch_ipsecpolicy(self, name_or_id):
resources = self.list_ipsecpolicies()['ipsecpolicies']
return self._fetch(resources, name_or_id)
def get_quotas_tenant(self):
'''
Fetches tenant info in server's context
for following quota operation
'''
return self.get_quotas_tenant()
def list_quotas(self):
'''
Fetches all tenants quotas
'''
return self.network_conn.list_quotas()
def show_quota(self, tenant_id):
'''
Fetches information of a certain tenant's quotas
'''
return self.network_conn.show_quota(tenant_id=tenant_id)
def update_quota(self, tenant_id, subnet=None, router=None,
network=None, floatingip=None, port=None,
sec_grp=None, sec_grp_rule=None):
'''
Update a tenant's quota
'''
body = {}
if subnet:
body['subnet'] = subnet
if router:
body['router'] = router
if network:
body['network'] = network
if floatingip:
body['floatingip'] = floatingip
if port:
body['port'] = port
if sec_grp:
body['security_group'] = sec_grp
if sec_grp_rule:
body['security_group_rule'] = sec_grp_rule
return self.network_conn.update_quota(tenant_id=tenant_id,
body={'quota': body})
def delete_quota(self, tenant_id):
'''
Delete the specified tenant's quota value
'''
ret = self.network_conn.delete_quota(tenant_id=tenant_id)
return ret if ret else True
def list_extensions(self):
'''
Fetches a list of all extensions on server side
'''
return self.network_conn.list_extensions()
def list_ports(self):
'''
Fetches a list of all ports for a tenant
'''
return self.network_conn.list_ports()
def show_port(self, port):
'''
Fetches information of a certain port
'''
return self._fetch_port(port)
def create_port(self, name, network, device_id=None, admin_state_up=True):
'''
Creates a new port
'''
net_id = self._find_network_id(network)
body = {'admin_state_up': admin_state_up,
'name': name,
'network_id': net_id}
if device_id:
body['device_id'] = device_id
return self.network_conn.create_port(body={'port': body})
def update_port(self, port, name, admin_state_up=True):
'''
Updates a port
'''
port_id = self._find_port_id(port)
body = {'name': name,
'admin_state_up': admin_state_up}
return self.network_conn.update_port(port=port_id,
body={'port': body})
def delete_port(self, port):
'''
Deletes the specified port
'''
port_id = self._find_port_id(port)
ret = self.network_conn.delete_port(port=port_id)
return ret if ret else True
def list_networks(self):
'''
Fetches a list of all networks for a tenant
'''
return self.network_conn.list_networks()
def show_network(self, network):
'''
Fetches information of a certain network
'''
|
avih/treeherder
|
treeherder/perfalert/perfalert/__init__.py
|
Python
|
mpl-2.0
| 5,173
| 0.000773
|
def analyze(data, weight_fn=None):
"""Returns the average and sample variance (s**2) of a list of floats.
`weight_fn` is a function that takes a list index and a window width, and
returns a weight that is used to calculate a weighted average. For example,
see `default_weights` or `linear_weights` below. If no function is passed,
`default_weights` is used and the average will be uniformly weighted.
"""
if weight_fn is None:
weight_fn = default_weights
n = len(data)
weights = [weight_fn(i, n) for i in range(n)]
weighted_sum = sum(data[i] * weights[i] for i in range(n))
weighted_avg = weighted_sum / sum(weights) if n > 0 else 0.0
variance = (sum(pow(d-weighted_avg, 2) for d in data) / (n-1)) if n > 1 else 0.0
return {"avg": weighted_avg, "n": n, "variance": variance}
def default_weights(i, n):
"""A window function that weights all points uniformly."""
return 1.0
def linear_weights(i, n):
"""A window function that falls off arithmetically.
This is used to calculate a weighted moving average (WMA) that gives higher
weight to changes near the point being analyzed, and smooth out changes at
the opposite edge of the moving window. See bug 879903 for details.
"""
if i >= n:
return 0.0
return float(n - i) / float(n)
def calc_t(w1, w2, weight_fn=None):
"""Perform a Students t-test on the two lists of data.
See the analyze() function for a description of the `weight_fn` argument.
"""
if len(w1) == 0 or len(w2) == 0:
return 0
s1 = analyze(w1, weight_fn)
s2 = analyze(w2, weight_fn)
delta_s = s2['avg'] - s1['avg']
if delta_s == 0:
return 0
if s1['variance'] == 0 and s2['variance'] == 0:
return float('inf')
return delta_s / (((s1['variance'] / s1['n']) + (s2['variance'] / s2['n'])) ** 0.5)
class Datum(object):
def __init__(self, push_timestamp, value, testrun_timestamp=None,
testrun_id=None, revision_id=None, state='good'):
# Date code was pushed
self.push_timestamp = push_timestamp
# Value of this point
self.value = value
# Which test run was this
self.testrun_id = testrun_id
# What revision this data is for
self.revision_id = revision_id
# t-test score
self.t = 0
# Whether a perf regression is found
self.state = state
def __cmp__(self, o):
# only compare value to make sorting deterministic
# in cases where we have multiple datapoints with
# the same push timestamp
return cmp(
(self.push_timestamp, self.testrun_id, self.value),
(o.push_timestamp, o.testrun_id, o.value),
)
def __repr__(self):
return "<%s: %s, %s, %.3f, %.3f, %s>" % (self.push_timestamp,
self.revision_id,
self.testrun_id, self.value,
self.t, self.state)
class Analyzer:
def __init__(self):
self.data = [] # List of PerfDatum instances
def add_data(self, push_timestamp, value, **kwargs):
self.data.append(Datum(push_timestamp, value, **kwargs))
self.data.sort()
def analyze_t(self, back_window=12, fore_window=12, t_threshold=7):
# Use T-Tests
# Analyze test data using T-Tests, comparing data[i-j:i] to data[i:i+k]
(j, k) = (back_window, fore_window)
good_data = []
num_points = len(self.data) - k + 1
for i in range(num_points):
di = self.data[i]
jw = [d.value for d in good_data[-j:]]
kw = [d.value for d in self.data[i:i+k]]
# Reverse the backward data so tha
|
t the current point is at the
# start of the window.
|
jw.reverse()
di.historical_stats = analyze(jw)
di.forward_stats = analyze(kw)
if len(jw) >= j:
di.t = abs(calc_t(jw, kw, linear_weights))
else:
# Assume it's ok, we don't have enough data
di.t = 0
good_data.append(di)
# Now that the t-test scores are calculated, go back through the data to
# find where regressions most likely happened.
for i in range(1, len(good_data) - 1):
di = good_data[i]
if di.t <= t_threshold:
continue
# Check the adjacent points
prev = good_data[i-1]
if prev.t > di.t:
continue
next = good_data[i+1]
if next.t > di.t:
continue
# This datapoint has a t value higher than the threshold and higher
# than either neighbor. Mark it as the cause of a regression.
di.state = 'regression'
# Return all but the first and last points whose scores we calculated,
# since we can only produce a final decision for a point whose scores
# were compared to both of its neighbors.
return self.data[1:num_points-1]
|
Yaco-Sistemas/django-autoreports
|
autoreports/urls.py
|
Python
|
lgpl-3.0
| 1,686
| 0.021945
|
# Copyrig
|
ht (c) 2010 by Yaco Sistemas <pmartin@yaco.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) an
|
y later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('autoreports.views',
url(r'^ajax/fields/tree/$', 'reports_ajax_fields', name='reports_ajax_fields'),
url(r'^ajax/fields/options/$', 'reports_ajax_fields_options', name='reports_ajax_fields_options'),
url(r'^(category/(?P<category_key>[\w-]+)/)?$', 'reports_list', name='reports_list'),
url(r'^(?P<registry_key>[\w-]+)/$', 'reports_api', name='reports_api'),
url(r'^(?P<registry_key>[\w-]+)/(?P<report_id>\d+)/$', 'reports_api', name='reports_api'),
url(r'^(?P<registry_key>[\w-]+)/reports/$', 'reports_api_list', name='reports_api_list'),
url(r'^(?P<registry_key>[\w-]+)/wizard/$', 'reports_api_wizard', name='reports_api_wizard'),
url(r'^(?P<registry_key>[\w-]+)/wizard/(?P<report_id>\d+)/$', 'reports_api_wizard', name='reports_api_wizard'),
url(r'^(?P<app_name>[\w-]+)/(?P<model_name>[\w-]+)/$', 'reports_view', name='reports_view'),
)
|
PUNCH-Cyber/stoq-plugins-public
|
mongodb/setup.py
|
Python
|
apache-2.0
| 364
| 0
|
from setuptools import setup, find_packages
setup(
name="mongo
|
db",
version="3.0.1",
author="Marcus LaFerrera (@mlaferrera)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Save results and archive payloads using MongoDB",
packages=find_packa
|
ges(),
include_package_data=True,
)
|
ggilestro/majordomo
|
actors/__init__.py
|
Python
|
mit
| 42
| 0
|
# -*- codin
|
g: utf-8 -*-
__all__ = ["mpd
|
"]
|
MichaelDoyle/Diamond
|
src/collectors/mysqlstat/mysql55.py
|
Python
|
mit
| 8,223
| 0
|
# coding=utf-8
"""
Diamond collector that monitors relevant MySQL performance_schema values
For now only monitors replication load
[Blog](http://bit.ly/PbSkbN) announcement.
[Snippet](http://bit.ly/SHwYhT) to build example graph.
#### Dependencies
* MySQLdb
* MySQL 5.5.3+
"""
from __future__ import division
try:
import MySQLdb
from MySQLdb import MySQLError
except ImportError:
MySQLdb = None
import diamond
import time
import re
class MySQLPerfCollector(diamond.collector.Collector):
def process_config(self):
super(MySQLPerfCollector, self).process_config()
self.db = None
self.last_wait_count = {}
self.last_wait_sum = {}
self.last_timestamp = {}
self.last_data = {}
self.monitors = {
'slave_sql': {
'wait/synch/cond/sql/MYSQL_RELAY_LOG::update_cond':
'wait_for_update',
'wait/io/file/innodb/innodb_data_file':
'innodb_data_file',
'wait/io/file/innodb/innodb_log_file':
'innodb_log_file',
'wait/io/file/myisam/dfile':
'myisam_dfile',
'wait/io/file/myisam/kfile':
'myisam_kfile',
'wait/io/file/sql/binlog':
'binlog',
'wait/io/file/sql/relay_log_info':
'relaylog_info',
'wait/io/file/sql/relaylog':
'relaylog',
'wait/synch/mutex/innodb':
'innodb_mutex',
'wait/synch/mutex':
'other_mute
|
x',
'wait/synch/rwlock':
'rwlocks',
'wait/io':
'other_io',
},
'slave_io': {
'wait/io/file/sql/relaylog_index':
|
'relaylog_index',
'wait/synch/mutex/sql/MYSQL_RELAY_LOG::LOCK_index':
'relaylog_index_lock',
'wait/synch/mutex/sql/Master_info::data_lock':
'master_info_lock',
'wait/synch/mutex/mysys/IO_CACHE::append_buffer_lock':
'append_buffer_lock',
'wait/synch/mutex/sql/LOG::LOCK_log':
'log_lock',
'wait/io/file/sql/master_info':
'master_info',
'wait/io/file/sql/relaylog':
'relaylog',
'wait/synch/mutex':
'other_mutex',
'wait/synch/rwlock':
'rwlocks',
'wait/io':
'other_io',
}
}
if self.config['hosts'].__class__.__name__ != 'list':
self.config['hosts'] = [self.config['hosts']]
# Move legacy config format to new format
if 'host' in self.config:
hoststr = "%s:%s@%s:%s/%s" % (
self.config['user'],
self.config['passwd'],
self.config['host'],
self.config['port'],
self.config['db'],
)
self.config['hosts'].append(hoststr)
def get_default_config_help(self):
config_help = super(MySQLPerfCollector, self).get_default_config_help()
config_help.update({
'hosts': 'List of hosts to collect from. Format is ' +
'yourusername:yourpassword@host:' +
'port/performance_schema[/nickname]',
'slave': 'Collect Slave Replication Metrics',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MySQLPerfCollector, self).get_default_config()
config.update({
'path': 'mysql',
# Connection settings
'hosts': [],
'slave': 'False',
})
return config
def connect(self, params):
if MySQLdb is None:
self.log.error('Unable to import MySQLdb')
return
try:
self.db = MySQLdb.connect(**params)
except MySQLError as e:
self.log.error('MySQLPerfCollector couldnt connect to database %s',
e)
return {}
self.log.debug('MySQLPerfCollector: Connected to database.')
def query_list(self, query, params):
cursor = self.db.cursor()
cursor.execute(query, params)
return list(cursor.fetchall())
def slave_load(self, nickname, thread):
data = self.query_list("""
SELECT
his.event_name,
his.sum_timer_wait,
his.count_star,
cur.event_name,
UNIX_TIMESTAMP(SYSDATE())
FROM
events_waits_summary_by_thread_by_event_name his
JOIN threads thr USING (thread_id)
JOIN events_waits_current cur USING (thread_id)
WHERE
name = %s
ORDER BY
his.event_name
""", (thread,))
wait_sum = sum([x[1] for x in data])
wait_count = sum([x[2] for x in data])
timestamp = int(time.time())
if 0 in data and len(data[0]) > 5:
cur_event_name, timestamp = data[0][3:]
if thread not in self.last_wait_sum:
# Avoid bogus data
self.last_wait_sum[thread] = wait_sum
self.last_wait_count[thread] = wait_count
self.last_timestamp[thread] = timestamp
self.last_data[thread] = data
return
wait_delta = wait_sum - self.last_wait_sum[thread]
time_delta = (timestamp - self.last_timestamp[thread]) * 1000000000000
if time_delta == 0:
return
# Summarize a few things
thread_name = thread[thread.rfind('/') + 1:]
data.append(
['wait/synch/mutex/innodb',
sum([x[1] for x in data
if x[0].startswith('wait/synch/mutex/innodb')])])
data.append(
['wait/synch/mutex',
sum([x[1] for x in data
if (x[0].startswith('wait/synch/mutex') and
x[0] not in self.monitors[thread_name])]) - data[-1][1]])
data.append(
['wait/synch/rwlock',
sum([x[1] for x in data
if x[0].startswith('wait/synch/rwlock')])])
data.append(
['wait/io',
sum([x[1] for x in data
if (x[0].startswith('wait/io') and
x[0] not in self.monitors[thread_name])])])
for d in zip(self.last_data[thread], data):
if d[0][0] in self.monitors[thread_name]:
self.publish(nickname + thread_name + '.' +
self.monitors[thread_name][d[0][0]],
(d[1][1] - d[0][1]) / time_delta * 100)
# Also log what's unaccounted for. This is where Actual Work gets done
self.publish(nickname + thread_name + '.other_work',
float(time_delta - wait_delta) / time_delta * 100)
self.last_wait_sum[thread] = wait_sum
self.last_wait_count[thread] = wait_count
self.last_timestamp[thread] = timestamp
self.last_data[thread] = data
def collect(self):
for host in self.config['hosts']:
matches = re.search(
'^([^:]*):([^@]*)@([^:]*):?([^/]*)/([^/]*)/?(.*)$', host)
if not matches:
continue
params = {'host': matches.group(3)}
try:
params['port'] = int(matches.group(4))
except ValueError:
params['port'] = 3306
params['db'] = matches.group(5)
params['user'] = matches.group(1)
params['passwd'] = matches.group(2)
nickname = matches.group(6)
if len(nickname):
nickname += '.'
self.connect(params=params)
if self.config['slave']:
self.slave_load(nickname, 'thread/sql/slave_io')
self.slave_load(nickname, 'thread/s
|
yochow/autotest
|
client/bin/harness.py
|
Python
|
gpl-2.0
| 2,286
| 0.006124
|
"""The harness interface
The interface between the client and the server when hosted.
"""
__author__ = """Copyright Andy Whitcroft 2006"""
import os, sys
import common
class harness(object):
"""The NULL server harness
Properties:
job
The job object for this job
"""
def __init__(self, job):
"""
job
The job object for this job
"""
self.setup(job)
def setup(self, job):
"""
job
The job object for this job
"""
self.job = job
configd = os.path.join(os.environ['AUTODIR'], 'configs')
if os.path.isdir(configd):
(name, dirs, files) = os.walk(configd).next()
job.config_set('kernel.default_config_set',
[ configd + '/' ] + files)
def run_start(self):
"""A run within this job is starting"""
pass
def run_pause(self):
"""A run within this job is completing (expect continue)"""
pass
def run_reboot(self):
"""A run within this job is performing a reboot
(expect continue following reboot)
"""
pass
def run_abort(self):
"""A run within this job is aborting. It all went wrong"""
|
pass
def run_complete(self):
"""A run within this job is completing (all done)"""
pass
def run_test_complete(self):
"""A test run by this job is complete. Note that if multiple
tests are run in parallel, this will only be called when all
of the parallel runs complete."""
pass
|
def test_status(self, status, tag):
"""A test within this job is completing"""
pass
def test_status_detail(self, code, subdir, operation, status, tag):
"""A test within this job is completing (detail)"""
pass
def select(which, job):
if not which:
which = 'standalone'
harness_name = 'harness_%s' % which
harness_module = common.setup_modules.import_module(harness_name,
'autotest_lib.client.bin')
harness_instance = getattr(harness_module, harness_name)(job)
return harness_instance
|
ksh/gpirecertification
|
controllers/assessments.py
|
Python
|
apache-2.0
| 11,743
| 0.007153
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = 'pgbovine@google.com (Philip Guo)'
import datetime
import logging
from models import courses
from models import models
from models import review
from models import student_work
from models import transforms
from models import utils
from models.models import Student
from models.models import ValidStudent
from models.models import Profile
from models.models import StudentAnswersEntity
from tools import verify
from utils import BaseHandler
from utils import HUMAN_READABLE_DATETIME_FORMAT
from google.appengine.ext import db
# questions per module - training 2 - 12 modules
# last is postcourse
# training
#MODULE_QUESTIONS = [4,10,7,5,5,5,5,7,5,5,5,11,7]
# recertification
MODULE_QUESTIONS = [2,4,5,4,3,7]
# mandatory modules 1 to 8 - needed?
#MANDATORY_MODULES = 8
# number of question modules
#MAX_MODULES = 6
MAX_MODULES = len(MODULE_QUESTIONS)-1
def calc_total_score(student):
#
mn = MODULE_QUESTIONS
# mm = MANDATORY_MODULES
#
overall_score = -1
ms = []
for i in range(1,MAX_MODULES+1):
course = 'a'+str(i)+'course'
ms.append(utils.get_score(student, course))
# get profile for this user - mandatary modules
valid = ValidStudent.get_valid(student.key().name())
prof = Profile.get_by_key_name(valid.profile)
auth = eval(prof.auth)
# complete = mandatory modules are done (have scores)
complete = True
i = 0
for score in ms[:MAX_MODULES]:
if auth[i]:
complete = complete and (score <> None)
i += 1
# compute overall score after mandatory modules are done
if complete:
part_score = 0
tq = 0
for i in range(MAX_MODULES):
if ms[i] <> None:
part_score += mn[i] * ms[i]
tq += mn[i]
# todo - somar 0.5 antes do int?
overall_score = int((part_score/tq)+0.5)
return overall_score
def store_score(course, student, assessment_name, assessment_type,score):
"""Stores a student's score on a particular assessment.
Args:
course: the course containing the assessment.
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the result of the assessment, if appropriate.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = course.get_score(student, assessment_name)
# remember to cast to int for comparison
# logging.error('assessment name : %s exist score : %s score %s ',assessment_name,existing_score, score)
if assessment_name != 'postcourse':
if (existing_score is None) or (score > int(existing_score)):
utils.set_score(student, assessment_name, score)
# special handling for computing final score:
if assessment_name == 'postcourse':
# midcourse_score = utils.get_score(student, 'midcourse')
# if midcourse_score is None:
# midcourse_score = 0
# else:
# midcourse_score = int(midcourse_score)
if existing_score is None:
postcourse_score = score
else:
postcourse_score = int(existing_score)
if score > postcourse_score:
postcourse_score = score
# Calculate overall score based on a formula
overall_score = calc_total_score(student)
# logging.error('overall_score : %s ', overall_score)
# if utils.get_score(student, 'postcourse') == 0 and (overall_score > -1) :
# utils.set_score(student, 'postcourse', overall_score)
# utils.set_score(student, 'overall_score', overall_score)
# TODO(pgbovine): this changing of assessment_type is ugly ...
if overall_score == 100:
assessment_name = 'postcourse_100'
else:
if overall_score >= 90:
assessment_name = 'postcourse_pass'
else:
if overall_score > 0:
assessment_name = 'postcourse_fail'
else:
assessment_name = 'not_complete'
# utils.set_score(student, 'overall_score', overall_score)
# store the overall_score of the first run of training in post_course
# post_s= utils.get_score(student, 'postcourse')
# logging.error('postcourse : %s ', utils.get_score(student, 'postcourse'))
if utils.get_score(student, 'postcourse') == None and (overall_score > -1):
utils.set_score(student, 'postcourse', overall_score)
utils.set_score(student, 'overall_score', overall_score)
over_s= utils.get_score(student, 'overall_score')
if over_s <> None:
overall_score = calc_total_score(student)
utils.set_score(student, 'overall_score', overall_score)
return assessment_name
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_name,assessment_type,new_answers, score):
"""Stores answer and updates user scores.
Args:
email: the student's email address.
assessment_type: the type of the assessment (as stated in unit.csv).
new_answers: the latest set of answers supplied by the student.
score: the numerical assessment score.
Returns:
the student instance.
"""
student = Student.get_enrolled_student_by_email(email)
course = self.get_course()
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_name, new_answers)
store_score(course, student, assessment_name, assessment_type,score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), transforms.dumps({
|
'type': 'assessment-%s' % assessment_name,
'values': new_answers, 'location': 'AnswerHandler'}))
return student
def post(self):
"""Handles POST requests."""
student = self.personal
|
ize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
course = self.get_course()
assessment_type = self.request.get('assessment_type')
assessment_name = self.request.get('assessment_name')
if not assessment_type:
self.error(404)
logging.error('No assessment type supplied.')
return
unit = course.find_unit_by_id(assessment_type)
if unit is None or unit.type != verify.UNIT_TYPE_ASSESSMENT:
self.error(404)
logging.error('No assessment named %s exists.', assessment_type)
return
self.te
|
underlost/GamerNews
|
gamernews/apps/news/fields.py
|
Python
|
mit
| 3,543
| 0.005927
|
import re
from django.db.models import fields
from django.template.defaultfilters import slugify
def _unique_slugify(instance, value, slug_field_name='slug', queryset=None, slug_separator='-'):
slug_field = instance._meta.get_field(slug_field_name)
slug_len = slug_field.max_length
# Sort out the initial slug. Chop its length down if we need to.
slug = slugify(value)
if slug_len:
slug = slug[:slug_len]
slug = _slug_strip(slug, slug_separator)
original_slug = slug
# Create a queryset, excluding the current instance.
if queryset is None:
queryset = instance.__class__._default_manager.all()
if instance.pk:
queryset = queryset.exclude(pk=instance.pk)
# Find a unique slug. If one matches, at '-2' to the end and try again
# (then '-3', etc).
next = 2
while not slug or queryset.filter(**{slug_field_name: slug}):
slug = original_slug
end = '-%s' % next
if slug_len and len(slug) + len(end) > slug_len:
slug = slug[:slug_len-len(end)]
slug = _slug_strip(slug, slug_separator)
slug = '%s%s' % (slug, end)
next += 1
setattr(instance, slug_field.attname, slug)
return slug
def _slug_strip(value, separator=None):
"""
Cleans up a slug by removing slug separator characters that occur at the
beginning or end of a slug.
If an alternate separator is used, it will also replace any instances of the
default '-' separator with the new separator.
"""
if separator == '-' or not separator:
re_sep = '-'
else:
re_sep = '(?:-|%s)' % re.escape(separator)
value = re.sub('%s+' % re_sep, separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
class AutoSlugField(fields.SlugField):
"""Auto slug field, creates unique slug for model."""
def __init__(self, prepopulate_from, *args, **kwargs):
"""Create auto slug field.
If field is unique, the uniqueness of the slug is ensured from existing
slugs by adding extra number at the end of slug.
If field has slug given, it is used instead. If you want to re-generate
the slug, just set it :const:`None` or :const:`""` so it will be re-
generated automatically.
:param prepopula
|
te_from: Must be assigned to list of field names which
are used to prepopulate automatica
|
lly.
:type prepopulate_from: sequence
"""
self.prepopulate_separator = kwargs.get("prepopulate_separator", u"-")
self.prepopulate_from = prepopulate_from
kwargs["blank"] = True
super(fields.SlugField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add): #@UnusedVariable
"""Pre-save event"""
current_slug = getattr(model_instance, self.attname)
# Use current slug instead, if it is given.
# Assumption: There are no empty slugs.
if not (current_slug is None or current_slug == ""):
slug = current_slug
else:
slug = self.prepopulate_separator.\
join(unicode(getattr(model_instance, prepop))
for prepop in self.prepopulate_from)
if self.unique:
return _unique_slugify(model_instance, value=slug,
slug_field_name=self.attname)
else:
return slugify(slug)[:self.max_length]
|
unikmhz/npui
|
netprofile_bills/netprofile_bills/views.py
|
Python
|
agpl-3.0
| 3,005
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# NetProfile: Bills module - Views
# Copyright © 2017 Alex Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (unicode_literals, print_function,
absolute_import, division)
from pyramid.i18n import TranslationStringFactory
from netprofile.common.hooks import register_hook
from netprofile.db.connection import DBSession
from .models import Bill
_ = TranslationStringFactory('netprofile_bills')
@register_hook('core.dpanetabs.bills.BillType')
def _dpane_billtype_bills(tabs, model, req):
loc = req.localizer
if req.has_permission('BILLS_LIST'):
tabs.append({
'title': loc.translate(_('Bills')),
'iconCls': 'ico-mod-bill',
'xtype': 'grid_bills_Bill',
'stateId':
|
None,
'stateful': False,
'hideColumns': ('type',),
'extraParamProp': 'btypeid',
'createControllers': 'NetProfile.core.controlle
|
r.RelatedWizard'
})
@register_hook('core.dpanetabs.entities.Entity')
@register_hook('core.dpanetabs.entities.PhysicalEntity')
@register_hook('core.dpanetabs.entities.LegalEntity')
@register_hook('core.dpanetabs.entities.StructuralEntity')
@register_hook('core.dpanetabs.entities.ExternalEntity')
def _dpane_entity_bills(tabs, model, req):
loc = req.localizer
if req.has_permission('BILLS_LIST'):
tabs.append({
'title': loc.translate(_('Bills')),
'iconCls': 'ico-mod-bill',
'xtype': 'grid_bills_Bill',
'stateId': None,
'stateful': False,
'hideColumns': ('entity',),
'extraParamProp': 'entityid',
'createControllers': 'NetProfile.core.controller.RelatedWizard'
})
@register_hook('documents.gen.object')
def _doc_gen_obj(tpl_vars, objid, objtype, req):
if objtype != 'bill':
return
obj = DBSession().query(Bill).get(objid)
if not obj:
return
mr = req.matched_route
if mr and mr.name and mr.name.startswith('documents.generate'):
tpl_vars.update({'bill': obj})
else:
v = obj.template_vars(req)
if v:
tpl_vars.update({'bill': v})
|
david81brs/seaport
|
week6/l5_hipotenusas.py
|
Python
|
gpl-2.0
| 632
| 0.022152
|
#!/usr/bin/python3
def soma_hipotenusas(n):
chipotenusa=1
somahipot=[]
while chipotenusa <=n:
cat1 = 1
while ca
|
t1 <=n:
cat2=1
cat1+=1
while cat2 <=n:
if (cat1**2 + cat2**2) == chipotenusa**2:
if chipotenusa not in somahipot:
somahipot.append(chipotenusa)
# print(cat1, cat2, chipotenusa, somahipot)
cat2+=1
chipotenusa+=1
acumulador=0
#print(somahipot)
for i in
|
range(0,len(somahipot)):
acumulador=acumulador+somahipot[i]
return acumulador
|
fs714/drcontroller
|
drcontroller/recovery/base.py
|
Python
|
apache-2.0
| 2,725
| 0.008807
|
import logging
import json
from ansible.runner import Runner
from taskflow.patterns import linear_flow, unordered_flow
from taskflow.task import Task
shell_task = Runner(
host_list=['eselnlx1453'],
pattern= '*',
module_name = 'shell',
module_args='echo "Hello World"')
copy_task = Runner(
host_list=['e
|
selnlx1277'],
pattern='*',
module_name='copy',
module_args='src=/home/ejjacci/ansible/example.py dest=/home/ejjacci/tmp/example.py')
class AnsibleTask(Task):
def __init__(self, name, host_list, module_name, module_args, pattern='*', inject=None):
super(AnsibleTask, self).__init__(name, inject=inject, provides=name) #provides send out
|
the results to engine.storage
self.name = name
self.host_list = host_list
self.logger = logging.getLogger("RecoveryHandler:Base")
self.runner = Runner(
host_list = host_list,
pattern = pattern,
module_name = module_name,
module_args = module_args)
def add_result_handler(self, result_handler):
self.result_handler = result_handler
def execute(self):
self.logger.info('Executing Task ' + self.name + ':')
self.logger.info('\tHosts: ' + ','.join(self.host_list))
self.logger.info('\tModule_name: ' + self.runner.module_name)
self.logger.info('\tModule_args: ' + self.runner.module_args)
self.logger.info('\tPattern: ' + self.runner.pattern)
result = self.runner.run()
# fake_task = Runner(
# host_list=['10.175.150.16'],
# pattern= '*',
# module_name = 'shell',
# module_args='echo "Hello World"')
# result = fake_task.run()
self.logger.debug('Result of Task ' + self.name + ':')
self.logger.debug(json.dumps(result, indent=4, sort_keys=True))
self.result_handler.analyze(self.name, result)
return result
class ShellTask(AnsibleTask):
def __init__(self, name, host_list, module_args, pattern='*', inject=None):
super(ShellTask, self).__init__(name, host_list, 'shell', module_args, pattern, inject)
class FlowCreator(object):
def create_flow(self, name):
raise NotImplementedError()
def create(self, name, tasks):
ul_flow = self.create_flow(name)
for task in tasks:
ul_flow.add(task)
return ul_flow
class UnorderedFlowCreator(FlowCreator):
def create_flow(self, name):
return unordered_flow.Flow(name)
class LinearFlowCreator(FlowCreator):
def create_flow(self, name):
return linear_flow.Flow(name)
|
bastibl/gr-ieee802-15-4
|
python/css_analyze_tx_signal.py
|
Python
|
gpl-3.0
| 6,539
| 0.029209
|
#! /usr/bin python
import numpy as np
import matplotlib.pyplot as plt
if __
|
name__ == "__main__":
print("Generate and demodulate IEEE 802.15.4 compliant CSS baseband signal")
slow_rate = False
phy_packetsize_bytes = 38
nframes = 40
chirp_number = 2
m = css_mod.modulator(slow_rate=slow_rate, phy_packetsize_bytes=phy_packetsize_bytes, nframes=nframes, chirp_number=chirp_number)
[payload,baseband] = m.modulate_random()
d = css_demod.demodulator(sl
|
ow_rate=slow_rate, phy_packetsize_bytes=phy_packetsize_bytes, nframes=nframes, chirp_number=chirp_number)
payload_rx = d.demodulate(baseband)
print("RX BER:", sum(abs(payload - payload_rx))/len(payload))
print("samples in one...")
print("-> subchirp: ", css_constants.n_sub)
print("-> average chirp sequence:", css_constants.n_chirp)
nsamp_frame = len(baseband)/m.nframes
print("-> frame: ", nsamp_frame)
nsamp_payload = m.phy_packetsize_bytes*css_constants.n_chirp
nsamp_header = nsamp_frame - nsamp_payload
print("-> frame header: ", nsamp_header)
print("-> frame payload: ", nsamp_payload)
f, axarr = plt.subplots(2)
axarr[0].stem(np.angle(d.sym_DQPSK, deg=True))
axarr[0].set_title("Demodulated DQPSK symbols")
axarr[1].stem(np.angle(m.frame_DQPSK, deg=True) - np.angle(d.sym_DQPSK, deg=True))
axarr[1].set_title("Difference between original and demodulated DQPSK symbols")
print("sum of difference of angles in DQPSK symbols:", sum(np.angle(m.frame_DQPSK, deg=True) - np.angle(d.sym_DQPSK, deg=True)))
f, axarr = plt.subplots(2)
axarr[0].stem(np.angle(d.sym_QPSK, deg=True))
axarr[0].set_title("Demodulated QPSK symbols")
axarr[1].stem(np.angle(m.frame_QPSK, deg=True) - np.angle(d.sym_QPSK, deg=True))
axarr[1].set_title("Difference between original and demodulated QPSK symbols")
print("sum of difference of angles in DQPSK symbols:", sum(np.angle(m.frame_QPSK, deg=True) - np.angle(d.sym_QPSK, deg=True)))
f, axarr = plt.subplots(4)
for i in range(4):
axarr[i].plot(m.possible_chirp_sequences[i].real,label='real')
axarr[i].plot(m.possible_chirp_sequences[i].imag,label='imag')
axarr[i].legend()
f.suptitle("Real and imaginary part of the 4 chirp sequences windows with the raised cosine")
# plot PSD and frequency mask
s = abs(np.fft.fftshift(np.fft.fft(baseband)))**2
freq = np.linspace(-css_constants.bb_samp_rate/2, css_constants.bb_samp_rate/2-1/css_constants.bb_samp_rate, len(s))
mask = np.zeros(len(s))
for i in range(len(mask)):
if abs(freq[i]) > 22e6:
mask[i] = 1e-5
if abs(freq[i]) > 11e6:
mask[i] = 1e-3
if abs(freq[i]) <= 11e6:
mask[i] = 1
f, axarr = plt.subplots(3,1)
s_norm = s/max(s)
axarr[0].plot(freq, 10*np.log10(s_norm))
axarr[0].plot(freq, 10*np.log10(mask), 'r')
axarr[0].set_title("Complex baseband spectrum and frequency mask")
axarr[0].set_ylabel("|S| [dB]")
axarr[0].set_xlabel("Hz")
axarr[0].set_ylim([-50,0])
axarr[0].set_xlim([freq[0], freq[-1]])
# plot time signal magnitude
t = np.linspace(0,1,css_constants.bb_samp_rate+1)
t = t[:len(s)]
axarr[1].plot(abs(baseband[:len(t)]))
axarr[1].set_title("Complex baseband magnitude")
axarr[1].set_xlabel("n")
axarr[1].set_ylabel("|s(n)|")
axarr[1].set_xlim([0,nsamp_frame])
# plot real part of time signal
axarr[2].plot(baseband[:len(t)].real, label='real')
axarr[2].plot(baseband[:len(t)].imag, label='imag')
axarr[2].legend()
axarr[2].set_title("Real and imaginary part of time signal using chirp sequence #"+str(m.chirp_number))
axarr[2].set_xlim([0,nsamp_frame])
for i in range(len(t)/nsamp_frame):
axarr[2].axvline(x=nsamp_frame*i, linewidth=4, color='r')
# plot auto-/crosscorrelation of chirp sequences
ccf = []
for i in range(4):
for k in range(4):
tmp = abs(np.correlate(m.possible_chirp_sequences[i], m.possible_chirp_sequences[k], mode='same'))
ccf.append(tmp)
f, axarr = plt.subplots(4,4)
for i in range(4):
for k in range(4):
titlestring = "("+str(i+1)+","+str(k+1)+")"
axarr[i,k].plot(ccf[i*4+k], label=titlestring)
axarr[i,k].legend()
f.suptitle("Cross correlation of chirp sequence pairs (no time gaps)")
# plot correlation of chirp sequences and transmit signal with raised cosine filter
f, axarr = plt.subplots(6)
axarr[0].plot(m.rcfilt, label="RC filter")
axarr[0].legend()
axarr[0].set_ylim([0,1.2])
for i in range(1,5):
titlestring = str(i)
axarr[i].plot(abs(np.correlate(m.rcfilt, m.possible_chirp_sequences[i-1], mode='full')), label=titlestring)
axarr[i].legend()
titlestring = "tx w/ rc filter"
axarr[5].plot(abs(np.correlate(m.rcfilt, baseband[:len(t)], mode='full')), label=titlestring)
axarr[5].legend()
f.suptitle("Correlation of raised cosine filter with chirp sequences and transmit signal")
# plot correlation of chirp sequences with transmit signal
f, axarr = plt.subplots(4)
for i in range(4):
titlestring = "chirp seq #"+str(i+1)
axarr[i].plot(abs(np.correlate(baseband, m.possible_chirp_sequences[i], mode='full')), label=titlestring)
axarr[i].legend()
axarr[i].set_xlim([0,nsamp_frame*m.nframes])
for k in range(m.nframes):
axarr[i].axvline(x=nsamp_frame*k, linewidth=4, color='r')
f.suptitle("Correlation of chirp sequences with transmit signal carrying chirp seq #"+ str(m.chirp_number))
# plot correlation of subchirps with transmit signal
f, axarr = plt.subplots(4,2)
sc = []
for i in range(4):
sc.append(m.chirp_seq[i*css_constants.n_sub:(i+1)*css_constants.n_sub])
for i in range(4):
titlestring = "subchirp #" + str(i+1)
axarr[i,0].plot(abs(np.correlate(baseband, sc[i])),label=titlestring)
axarr[i,0].legend()
axarr[i,0].set_xlim([0,4*css_constants.n_chirp])
axarr[i,1].plot(sc[i].real,label='real')
axarr[i,1].plot(sc[i].imag,label='imag')
axarr[i,1].legend()
f.suptitle("Correlation of subchirps with transmit signal")
# plot correlation of subchirps with frequency shifted transmit signal
cfo = 50000 # Hz
baseband_foff = baseband[:len(t)]*np.exp(1j*2*np.pi*cfo*t)
f, axarr = plt.subplots(4)
for i in range(4):
titlestring = "subchirp #"+str(i)
axarr[i].plot(abs(np.correlate(baseband_foff, sc[i])),label=titlestring)
axarr[i].set_xlim([0,4*css_constants.n_chirp])
axarr[i].legend()
f.suptitle("Correlation of subchirps and transmit signal with "+str(cfo/1000)+" kHz CFO")
# plot correlator output magnitude and phase
# f, axarr = plt.subplots(2)
# axarr[0].plot(abs(correlator_out))
# axarr[0].set_title("Magnitude")
# axarr[1].stem(np.angle(correlator_out)/np.pi*180)
# axarr[1].set_title("Phase")
# f.suptitle("RX correlator output")
plt.show()
|
VladimirVystupkin/AMRParsing1.x
|
smatch_v1_0/smatch-table.py
|
Python
|
gpl-2.0
| 12,971
| 0.046334
|
#!/usr/bin/env python
import amr
import sys
import subprocess
import smatch_modified
import os
import random
import time
#import optparse
#import argparse #argparse only works for python 2.7. If you are using older versin of Python, you can use optparse instead.
#import locale
ERROR_LOG=sys.stderr
verbose=False
isi_dir_pre="/nfs/web/isi.edu/cgi-bin/div3/mt/save-amr"
"""
Get the annotator name list based on a list of files
Args:
file_dir: AMR file folder
files: a list of AMR names, e.g. nw_wsj_0001_1
Return:
a list of user names who annotate all the files
"""
def get_names(file_dir,files):
#for each user, check if they have files available
#return user name list
total_list=[]
name_list=[]
get_sub=False
for path,subdir,dir_files in
|
os.walk(file_dir):
# print path
if get_sub==False:
total_list=subdir[:]
get_sub=True
else:
break
for user in total_list:
#print user
has_file=True
for file in files:
# print file
file_path=file_dir+user+"/"+file+".txt"
# print file_path
if not os.path.exists(file_path):
has_file=False
break
if has_file==True:
name_list.append(user)
# print name_list
if len(name_list)==0:
print
|
>> ERROR_LOG,"********Error: Cannot find any user who completes the files*************"
return name_list
"""
Compute the smatch scores for a file list between two users
Args:
user1: user 1 name
user2: user 2 name
file_list: file list
dir_pre: the file location prefix
start_num: the number of restarts in smatch
Returns:
smatch f score.
"""
def compute_files(user1,user2,file_list,dir_pre,start_num):
#print file_list
#print user1, user2
match_total=0
test_total=0
gold_total=0
for fi in file_list:
file1=dir_pre+user1+"/"+fi+".txt"
file2=dir_pre+user2+"/"+fi+".txt"
#print file1,file2
if not os.path.exists(file1):
print >> ERROR_LOG,"*********Error: ", file1, "does not exist*********"
return -1.00
if not os.path.exists(file2):
print >> ERROR_LOG,"*********Error: ", file2, "does not exist*********"
return -1.00
try:
file1_h=open(file1,"r")
file2_h=open(file2,"r")
except:
print >> ERROR_LOG, "Cannot open the files", file1, file2
cur_amr1=smatch_modified.get_amr_line(file1_h)
cur_amr2=smatch_modified.get_amr_line(file2_h)
if(cur_amr1==""):
print >> ERROR_LOG, "AMR 1 is empty"
continue
if(cur_amr2==""):
print >> ERROR_LOG, "AMR 2 is empty"
continue
amr1=amr.AMR.parse_AMR_line(cur_amr1)
amr2=amr.AMR.parse_AMR_line(cur_amr2)
test_label="a"
gold_label="b"
amr1.rename_node(test_label)
amr2.rename_node(gold_label)
(test_inst,test_rel1,test_rel2)=amr1.get_triples2()
(gold_inst,gold_rel1,gold_rel2)=amr2.get_triples2()
if verbose:
print >> ERROR_LOG,"Instance triples of file 1:",len(test_inst)
print >> ERROR_LOG,test_inst
print >> sys.stderr,"Relation triples of file 1:",len(test_rel1)+len(test_rel2)
print >>sys.stderr,test_rel1
print >> sys.stderr,test_rel2
print >> ERROR_LOG,"Instance triples of file 2:",len(gold_inst)
print >> ERROR_LOG,gold_inst
print >> sys.stderr,"Relation triples of file 2:",len(gold_rel1)+len(gold_rel2)
print >> sys.stderr,gold_rel1
print >> sys.stderr,gold_rel2
if len(test_inst)<len(gold_inst):
(best_match,best_match_num)=smatch_modified.get_fh(test_inst,test_rel1,test_rel2,gold_inst,gold_rel1,gold_rel2,test_label,gold_label)
if verbose:
print >> ERROR_LOG, "best match number",best_match_num
print >>ERROR_LOG,"Best Match:",smatch_modified.print_alignment(best_match,test_inst,gold_inst)
else:
(best_match,best_match_num)=smatch_modified.get_fh(gold_inst,gold_rel1,gold_rel2,test_inst,test_rel1,test_rel2,gold_label,test_label)
if verbose:
print >> ERROR_LOG, "best match number",best_match_num
print >>ERROR_LOG,"Best Match:",smatch_modified.print_alignment(best_match,gold_inst,test_inst,True)
#(match_num,test_num,gold_num)=smatch.get_match(tmp_filename1,tmp_filename2,start_num)
#print match_num,test_num,gold_num
# print best_match_num
# print len(test_inst)+len(test_rel1)+len(test_rel2)
# print len(gold_inst)+len(gold_rel1)+len(gold_rel2)
match_total+=best_match_num
test_total+=len(test_inst)+len(test_rel1)+len(test_rel2)
gold_total+=len(gold_inst)+len(gold_rel1)+len(gold_rel2)
smatch_modified.match_num_dict.clear()
(precision,recall,f_score)=smatch_modified.compute_f(match_total,test_total,gold_total)
return "%.2f" % f_score
def get_max_width(table,index):
return max([len(str(row[index])) for row in table])
"""
Print a table
"""
def pprint_table(table):
col_paddings=[]
for i in range(len(table[0])):
col_paddings.append(get_max_width(table,i))
for row in table:
print row[0].ljust(col_paddings[0]+1),
for i in range(1,len(row)):
col = str(row[i]).rjust(col_paddings[i]+2)
print col,
print "\n"
def print_help():
print "Smatch Calculator Program Help"
print "This program prints the smatch score of the two files"
print "Command line arguments:"
print "-h: Show help (Other options won't work if you use -h)"
print "smatch-table.py -h"
print "Usage: smatch-table.py file_list (-f list_file) [ -p user_list ] [-r number of starts]"
print "File list is AMR file ids separated by a blank space"
print "Example: smatch-table.py nw_wsj_0001_1 nw_wsj_0001_2"
print "Or use -f list_file to indicate a file which contains one line of file names, separated by a blank space"
print "Example: smatch.py -f file"
print "-p: (Optional) user list to list the user name in the command line, after the file list. Otherwise the program automatically searches for the users who completes all AMRs you want."
print "Example: smatch.py -f file -p user1 user2"
print "Example: smatch.py nw_wsj_0001_1 nw_wsj_0001_2 -p user1 user2"
print "-r: (Optional) the number of random starts(higher number may results in higher accuracy and slower speed (default number of starts: 10)"
print "Example: smatch.py -f file -p user1 user2 -r 20"
# print "-d: detailed output, including alignment and triples of the two files"
# print "Example (if you want to use all options): smatch.py file1 file2 -d -r 20"
print "Contact shucai@isi.edu for additional help"
def build_arg_parser():
"""Build an argument parser using argparse"""
parser=argparse.ArgumentParser(description="Smatch table calculator -- arguments")
parser.add_argument("--fl",type=argparse.FileType('r'),help='AMR ID list file')
parser.add_argument('-f',nargs='+',help='AMR IDs (at least one)')
parser.add_argument("-p",nargs='*',help="User list (can be none)")
parser.add_argument("--fd",default=isi_dir_pre,help="AMR File directory. Default=location on isi machine")
#parser.add_argument("--cd",default=os.getcwd(),help="(Dependent) code directory. Default: current directory")
parser.add_argument('-r',type=int,default=4,help='Restart number (Default:4)')
parser.add_argument('-v',action='store_true',help='Verbose output (Default:False)')
return parser
"""
Callback function to handle variable number of arguments in optparse
"""
def cb(option, opt_str, value, parser):
args=[]
args.append(value)
for arg in parser.rargs:
if arg[0] != "-":
args.append(arg)
else:
del parser.rargs[:len(args)]
break
if getattr(parser.values, option.dest):
args.extend(getattr(parser.values, option.dest))
setattr(parser.values, option.dest, args)
def build_arg_parser2():
"""Build an argument parser using optparse"""
usage_str="Smatch table calculator -- arguments"
parser=optparse.OptionParser(usage=usage_str)
parser.add_option("--fl",dest="fl",type="string",help='AMR ID list file')
parser.add_option("-f",dest="f",type="string",action="callback",callback=cb,help="AMR IDs (at
|
DONIKAN/django
|
tests/postgres_tests/array_default_migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 654
| 0.003058
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
o
|
perations = [
|
migrations.CreateModel(
name='IntegerArrayDefaultModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field', django.contrib.postgres.fields.ArrayField(models.IntegerField(), size=None)),
],
options={
},
bases=(models.Model,),
),
]
|
deathglitch/metarigging
|
python/metaui/__init__.py
|
Python
|
mit
| 1,252
| 0.009585
|
import os
import sys
import inspect
_module = sys.modules[__name__]
_sa
|
fe_import_list = [_x.__name__ for _x in _module, os, sys, inspect]
for mod in [m for m in sys.modules.keys() if m != __name__ and sys.modul
|
es[m] != None and m.startswith(__name__) and len(m.split(__name__ + ".")[-1].split(".")) == 1 and (not sys.modules[m].__file__.split(os.path.sep)[-1].startswith("__init__"))]:
del(sys.modules[mod])
from example_ui import *
#deleting classes, function, modules not in module
for _function_name in [_member[0] for _member in inspect.getmembers(_module, inspect.isfunction) if not _member[1].__module__.startswith(__name__)]:
delattr(_module, _function_name)
for _class_name in [_member[0] for _member in inspect.getmembers(_module, inspect.isclass) if not _member[1].__module__.startswith(__name__)]:
delattr(_module, _class_name)
for _module_info in [_member for _member in inspect.getmembers(_module, inspect.ismodule) if _member[1].__name__ not in _safe_import_list]:
if not hasattr(_module_info[1], "__file__"):
delattr(_module, _module_info[0])
continue
if not _module_info[1].__file__.lower().startswith(__file__.rpartition("\\")[0].lower()):
delattr(_module, _module_info[0])
|
arnedesmedt/dotfiles
|
.config/sublime-text-3/Packages.symlinkfollow/mdpopups/st3/mdpopups/st_color_scheme_matcher.py
|
Python
|
mit
| 23,380
| 0.003208
|
"""
color_scheme_matcher.
Licensed under MIT.
Copyright (C) 2012 Andrew Gibson <agibsonsw@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
---------------------
Original code has been heavily modifed by Isaac Muse <isaacmuse@gmail.com> for the ExportHtml project.
Algorithm has been split out into a separate library and been enhanced with a number of features.
"""
from __future__ import absolute_import
import sublime
import re
from .rgba import RGBA, clamp, round_int
from . import x11colors
from os import path
from collections import namedtuple
from plistlib import readPlistFromBytes
import decimal
FONT_STYLE = "font_style" if int(sublime.version()) >= 3151 else "fontStyle"
# For new Sublime format
FLOAT_TRIM_RE = re.compile(r'^(?P<keep>\d+)(?P<trash>\.0+|(?P<keep2>\.\d*[1-9])0+)$')
COLOR_PARTS = {
"percent": r"[+\-]?(?:(?:\d*\.\d+)|\d+)%",
"float": r"[+\-]?(?:(?:\d*\.\d+)|\d+)"
}
RGB_COLORS = r"""(?x)
(?P<hexa>\#(?P<hexa_content>[\dA-Fa-f]{8}))\b |
(?P<hex>\#(?P<hex_content>[\dA-Fa-f]{6}))\b |
(?P<hexa_compressed>\#(?P<hexa_compressed_content>[\dA-Fa-f]{4}))\b |
(?P<hex_compressed>\#(?P<hex_compressed_content>[\dA-Fa-f]{3}))\b |
\b(?P<rgb>rgb\(\s*(?P<rgb_content>(?:%(float)s\s*,\s*){2}%(float)s | (?:%(percent)s\s*,\s*){2}%(percent)s)\s*\)) |
\b(?P<rgba>rgba\(\s*(?P<rgba_content>
(?:%(float)s\s*,\s*){3}(?:%(percent)s|%(float)s) | (?:%(percent)s\s*,\s*){3}(?:%(percent)s|%(float)s)
)\s*\))
""" % COLOR_PARTS
HSL_COLORS = r"""(?x)
\b(?P<hsl>hsl\(\s*(?P<hsl_content>%(float)s\s*,\s*%(percent)s\s*,\s*%(percent)s)\s*\)) |
\b(?P<hsla>hsla\(\s*(?P<hsla_content>%(float)s\s*,\s*(?:%(percent)s\s*,\s*){2}(?:%(percent)s|%(float)s))\s*\))
""" % COLOR_PARTS
VARIABLES = r"""(?x)
\b(?P<var>var\(\s*(?P<var_content>\w[\w\d]*)\s*\))
"""
COLOR_MOD = r"""(?x)
\b(?P<color>color\((?P<color_content>.*)\))
"""
COLOR_NAMES = r'\b(?P<x11colors>%s)\b(?!\()' % '|'.join([name for name in x11colors.name2hex_map.keys()])
COLOR_RE = re.compile(
r'(?x)(?i)(?:%s|%s|%s|%s|%s)' % (
RGB_COLORS,
HSL_COLORS,
VARIABLES,
COLOR_MOD,
COLOR_NAMES
)
)
COLOR_RGB_SPACE_RE = re.compile(
r'(?x)(?i)(?:%s|%s|%s)' % (
RGB_COLORS,
VARIABLES,
COLOR_NAMES
)
)
COLOR_MOD_RE = re.compile(
r'''(?x)
color\(\s*
(?P<base>\#[\dA-Fa-f]{8}|\#[\dA-Fa-f]{6})
\s+(?P<type>blenda?)\(
(?P<color>\#[\dA-Fa-f]{8}|\#[\dA-Fa-f]{6})
\s+(?P<percent>%(percent)s)
\)
(?P<other>
(?:\s+blenda?\((?:\#[\dA-Fa-f]{8}|\#[\dA-Fa-f]{6})\s+%(percent)s\))+
)?
\s*\)
''' % COLOR_PARTS
)
def fmt_float(f, p=0):
"""Set float precision a
|
nd trim precision zeros."""
string = str(
decimal.Decimal(f).quantize(decimal.Decimal('0.' + ('0' * p)
|
if p > 0 else '0'), decimal.ROUND_HALF_UP)
)
m = FLOAT_TRIM_RE.match(string)
if m:
string = m.group('keep')
if m.group('keep2'):
string += m.group('keep2')
return string
def alpha_dec_normalize(dec):
"""Normailze a deciaml alpha value."""
temp = float(dec)
if temp < 0.0 or temp > 1.0:
dec = fmt_float(clamp(float(temp), 0.0, 1.0), 3)
alpha = "%02x" % round_int(float(dec) * 255.0)
return alpha
def alpha_percent_normalize(perc):
"""Normailze a percent alpha value."""
alpha_float = clamp(float(perc.strip('%')), 0.0, 100.0) / 100.0
alpha = "%02x" % round_int(alpha_float * 255.0)
return alpha
def blend(m):
"""Blend colors."""
base = m.group('base')
color = m.group('color')
blend_type = m.group('type')
percent = m.group('percent')
if percent.endswith('%'):
percent = float(percent.strip('%'))
else:
percent = int(alpha_dec_normalize(percent), 16) * (100.0 / 255.0)
rgba = RGBA(base)
rgba.blend(color, percent, alpha=(blend_type == 'blenda'))
color = rgba.get_rgb() if rgba.a == 255 else rgba.get_rgba()
if m.group('other'):
color = "color(%s %s)" % (color, m.group('other'))
return color
def translate_color(m, var, var_src):
"""Translate the match object to a color w/ alpha."""
color = None
alpha = None
groups = m.groupdict()
if groups.get('hex_compressed'):
content = m.group('hex_compressed_content')
color = "#%02x%02x%02x" % (
int(content[0:1] * 2, 16), int(content[1:2] * 2, 16), int(content[2:3] * 2, 16)
)
elif groups.get('hexa_compressed'):
content = m.group('hexa_compressed_content')
color = "#%02x%02x%02x" % (
int(content[0:1] * 2, 16), int(content[1:2] * 2, 16), int(content[2:3] * 2, 16)
)
alpha = content[3:]
elif groups.get('hex'):
content = m.group('hex_content')
if len(content) == 6:
color = "#%02x%02x%02x" % (
int(content[0:2], 16), int(content[2:4], 16), int(content[4:6], 16)
)
else:
color = "#%02x%02x%02x" % (
int(content[0:1] * 2, 16), int(content[1:2] * 2, 16), int(content[2:3] * 2, 16)
)
elif groups.get('hexa'):
content = m.group('hexa_content')
if len(content) == 8:
color = "#%02x%02x%02x" % (
int(content[0:2], 16), int(content[2:4], 16), int(content[4:6], 16)
)
alpha = content[6:]
else:
color = "#%02x%02x%02x" % (
int(content[0:1] * 2, 16), int(content[1:2] * 2, 16), int(content[2:3] * 2, 16)
)
alpha = content[3:]
elif groups.get('rgb'):
content = [x.strip() for x in m.group('rgb_content').split(',')]
if content[0].endswith('%'):
r = round_int(clamp(float(content[0].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
g = round_int(clamp(float(content[1].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
b = round_int(clamp(float(content[2].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
color = "#%02x%02x%02x" % (r, g, b)
else:
color = "#%02x%02x%02x" % (
clamp(round_int(float(content[0])), 0, 255),
clamp(round_int(float(content[1])), 0, 255),
clamp(round_int(float(content[2])), 0, 255)
)
elif groups.get('rgba'):
content = [x.strip() for x in m.group('rgba_content').split(',')]
if content[0].endswith('%'):
r = round_int(clamp(float(content[0].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
g = round_int(clamp(float(content[1].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
b = round_int(clamp(float(content[2].strip('%')), 0.0, 255.0) * (255.0 / 100.0))
color = "#%02x%02x%02x" % (r, g, b)
else:
color = "#%02x%02x%02x" % (
clamp(round_int(float(content[0])), 0, 255),
clamp(round_int(float(content[1])), 0, 255),
clamp(round_int(float(content[2])), 0, 255)
)
if content[3].endswith('%'):
alpha = alpha_percent_normalize(content[3])
else:
alpha = alpha_dec_
|
liqd/a4-meinberlin
|
tests/topicprio/dashboard_components/test_views_module_topics.py
|
Python
|
agpl-3.0
| 3,141
| 0
|
import pytest
from django.urls import reverse
from adhocracy4.dashboard import components
from adhocracy4.test.helpers import assert_template_response
from adhocracy4.test.helpers import redirect_target
from adhocracy4.test.helpers im
|
port setup_phase
from meinberlin.apps.topicprio.models import Topic
from meinberlin.apps.topic
|
prio.phases import PrioritizePhase
component = components.modules.get('topic_edit')
@pytest.mark.django_db
def test_edit_view(client, phase_factory, topic_factory):
phase, module, project, item = setup_phase(
phase_factory, topic_factory, PrioritizePhase)
initiator = module.project.organisation.initiators.first()
url = component.get_base_url(module)
client.login(username=initiator.email, password='password')
response = client.get(url)
assert_template_response(response,
'meinberlin_topicprio/topic_dashboard_list.html')
@pytest.mark.django_db
def test_topic_create_view(client, phase_factory, category_factory):
phase, module, project, item = setup_phase(
phase_factory, None, PrioritizePhase)
initiator = module.project.organisation.initiators.first()
category = category_factory(module=module)
url = reverse('a4dashboard:topic-create',
kwargs={'module_slug': module.slug})
data = {
'name': 'test',
'description': 'test',
'category': category.pk
}
client.login(username=initiator.email, password='password')
response = client.post(url, data)
assert redirect_target(response) == 'topic-list'
topic = Topic.objects.get(name=data.get('name'))
assert topic.description == data.get('description')
assert topic.category.pk == data.get('category')
@pytest.mark.django_db
def test_topic_update_view(
client, phase_factory, topic_factory, category_factory):
phase, module, project, item = setup_phase(
phase_factory, topic_factory, PrioritizePhase)
initiator = module.project.organisation.initiators.first()
category = category_factory(module=module)
url = reverse('a4dashboard:topic-update',
kwargs={'pk': item.pk, 'year': item.created.year})
data = {
'name': 'test',
'description': 'test',
'category': category.pk
}
client.login(username=initiator.email, password='password')
response = client.post(url, data)
assert redirect_target(response) == 'topic-list'
item.refresh_from_db()
assert item.description == data.get('description')
assert item.category.pk == data.get('category')
@pytest.mark.django_db
def test_topic_delete_view(client, phase_factory, topic_factory):
phase, module, project, item = setup_phase(
phase_factory, topic_factory, PrioritizePhase)
initiator = module.project.organisation.initiators.first()
url = reverse('a4dashboard:topic-delete',
kwargs={'pk': item.pk, 'year': item.created.year})
client.login(username=initiator.email, password='password')
response = client.delete(url)
assert redirect_target(response) == 'topic-list'
assert not Topic.objects.exists()
|
Captain-Coder/tribler
|
Tribler/Test/Core/test_sqlitecachedbhandler_torrents.py
|
Python
|
lgpl-3.0
| 13,903
| 0.002374
|
import os
import struct
from binascii import unhexlify
from shutil import copy as copyfile
from twisted.internet.defer import inlineCallbacks
from Tribler.Core.CacheDB.SqliteCacheDBHandler import TorrentDBHandler, MyPreferenceDBHandler, ChannelCastDBHandler
from Tribler.Core.CacheDB.sqlitecachedb import str2bin
from Tribler.Core.Category.Category import Category
from Tribler.Core.TorrentDef import TorrentDef
from Tribler.Core.leveldbstore import LevelDbStore
from Tribler.Test.Core.test_sqlitecachedbhandler import AbstractDB
from Tribler.Test.common import TESTS_DATA_DIR
S_TORRENT_PATH_BACKUP = os.path.join(TESTS_DATA_DIR, 'bak_single.torrent')
M_TORRENT_PATH_BACKUP = os.path.join(TESTS_DATA_DIR, 'bak_multiple.torrent')
class TestTorrentFullSessionDBHandler(AbstractDB):
def setUpPreSession(self):
super(TestTorrentFullSessionDBHandler, self).setUpPreSession()
self.config.set_megacache_enabled(True)
@inlineCallbacks
def setUp(self):
yield super(TestTorrentFullSessionDBHandler, self).setUp()
self.tdb = TorrentDBHandler(self.session)
def test_initialize(self):
self.tdb.initialize()
self.assertIsNone(self.tdb.mypref_db)
self.assertIsNone(self.tdb.votecast_db)
self.assertIsNone(self.tdb.channelcast_db)
class TestTorrentDBHandler(AbstractDB):
def addTorrent(self):
old_size = self.tdb.siz
|
e()
old_tracker_size = self.tdb._db.size('TrackerInfo')
s_infohash = unhexlify('44865489ac16e2f34ea0cd3043cfd970cc24ec09')
m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98')
single_torrent_file_path = os.path.join(self.getStateDir(), 'single.torrent')
multiple_torrent_file_path = os.path.join(self.getStateDir(), 'multiple.torrent')
copyfile(S_TORRENT_PATH_BACKUP, single_torrent_file_path)
copyfile(M_TORRENT_PATH_
|
BACKUP, multiple_torrent_file_path)
single_tdef = TorrentDef.load(single_torrent_file_path)
self.assertEqual(s_infohash, single_tdef.get_infohash())
multiple_tdef = TorrentDef.load(multiple_torrent_file_path)
self.assertEqual(m_infohash, multiple_tdef.get_infohash())
self.tdb.addExternalTorrent(single_tdef)
self.tdb.addExternalTorrent(multiple_tdef)
single_torrent_id = self.tdb.getTorrentID(s_infohash)
multiple_torrent_id = self.tdb.getTorrentID(m_infohash)
self.assertEqual(self.tdb.getInfohash(single_torrent_id), s_infohash)
single_name = 'Tribler_4.1.7_src.zip'
multiple_name = 'Tribler_4.1.7_src'
self.assertEqual(self.tdb.size(), old_size + 2)
new_tracker_table_size = self.tdb._db.size('TrackerInfo')
self.assertLess(old_tracker_size, new_tracker_table_size)
sname = self.tdb.getOne('name', torrent_id=single_torrent_id)
self.assertEqual(sname, single_name)
mname = self.tdb.getOne('name', torrent_id=multiple_torrent_id)
self.assertEqual(mname, multiple_name)
s_size = self.tdb.getOne('length', torrent_id=single_torrent_id)
self.assertEqual(s_size, 1583233)
m_size = self.tdb.getOne('length', torrent_id=multiple_torrent_id)
self.assertEqual(m_size, 5358560)
cat = self.tdb.getOne('category', torrent_id=multiple_torrent_id)
self.assertEqual(cat, u'xxx')
s_status = self.tdb.getOne('status', torrent_id=single_torrent_id)
self.assertEqual(s_status, u'unknown')
m_comment = self.tdb.getOne('comment', torrent_id=multiple_torrent_id)
comments = 'www.tribler.org'
self.assertGreater(m_comment.find(comments), -1)
comments = 'something not inside'
self.assertEqual(m_comment.find(comments), -1)
m_trackers = self.tdb.getTrackerListByInfohash(m_infohash)
self.assertEqual(len(m_trackers), 8)
self.assertIn('http://tpb.tracker.thepiratebay.org/announce', m_trackers)
s_torrent = self.tdb.getTorrent(s_infohash)
m_torrent = self.tdb.getTorrent(m_infohash)
self.assertEqual(s_torrent['name'], 'Tribler_4.1.7_src.zip')
self.assertEqual(m_torrent['name'], 'Tribler_4.1.7_src')
self.assertEqual(m_torrent['last_tracker_check'], 0)
def updateTorrent(self):
m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98')
self.tdb.updateTorrent(m_infohash, relevance=3.1415926, category=u'Videoclips',
status=u'good', seeder=123, leecher=321,
last_tracker_check=1234567,
other_key1='abcd', other_key2=123)
multiple_torrent_id = self.tdb.getTorrentID(m_infohash)
category = self.tdb.getOne('category', torrent_id=multiple_torrent_id)
self.assertEqual(category, u'Videoclips')
status = self.tdb.getOne('status', torrent_id=multiple_torrent_id)
self.assertEqual(status, u'good')
seeder = self.tdb.getOne('num_seeders', torrent_id=multiple_torrent_id)
self.assertEqual(seeder, 123)
leecher = self.tdb.getOne('num_leechers', torrent_id=multiple_torrent_id)
self.assertEqual(leecher, 321)
last_tracker_check = self.tdb.getOne('last_tracker_check', torrent_id=multiple_torrent_id)
self.assertEqual(last_tracker_check, 1234567)
def setUpPreSession(self):
super(TestTorrentDBHandler, self).setUpPreSession()
self.config.set_megacache_enabled(True)
self.config.set_torrent_store_enabled(True)
@inlineCallbacks
def setUp(self):
yield super(TestTorrentDBHandler, self).setUp()
from Tribler.Core.APIImplementation.LaunchManyCore import TriblerLaunchMany
from Tribler.Core.Modules.tracker_manager import TrackerManager
self.session.lm = TriblerLaunchMany()
self.session.lm.tracker_manager = TrackerManager(self.session)
self.tdb = TorrentDBHandler(self.session)
self.tdb.torrent_dir = TESTS_DATA_DIR
self.tdb.category = Category()
self.tdb.mypref_db = MyPreferenceDBHandler(self.session)
@inlineCallbacks
def tearDown(self):
self.tdb.mypref_db.close()
self.tdb.mypref_db = None
self.tdb.close()
self.tdb = None
yield super(TestTorrentDBHandler, self).tearDown()
def test_hasTorrent(self):
infohash_str = 'AA8cTG7ZuPsyblbRE7CyxsrKUCg='
infohash = str2bin(infohash_str)
self.assertTrue(self.tdb.hasTorrent(infohash))
self.assertTrue(self.tdb.hasTorrent(infohash)) # cache will trigger
fake_infohash = 'fake_infohash_100000'
self.assertFalse(self.tdb.hasTorrent(fake_infohash))
def test_get_infohash(self):
self.assertTrue(self.tdb.getInfohash(1))
self.assertFalse(self.tdb.getInfohash(1234567))
def test_add_update_torrent(self):
self.addTorrent()
self.updateTorrent()
def test_update_torrent_from_metainfo(self):
# Add torrent first
infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98')
# Only infohash is added to the database
self.tdb.addOrGetTorrentID(infohash)
# Then update the torrent with metainfo
metainfo = {'info': {'files': [{'path': ['Something.something.pdf'], 'length': 123456789},
{'path': ['Another-thing.jpg'], 'length': 100000000}],
'piece length': 2097152,
'name': '\xc3Something awesome (2015)',
'pieces': ''},
'seeders': 0, 'initial peers': [],
'leechers': 36, 'download_exists': False, 'nodes': []}
self.tdb.update_torrent_with_metainfo(infohash, metainfo)
# Check updates are correct
torrent_id = self.tdb.getTorrentID(infohash)
name = self.tdb.getOne('name', torrent_id=torrent_id)
self.assertEqual(name, u'\xc3Something awesome (2015)')
num_files = self.tdb.getOne('num_files', torrent_id=torrent_id)
self.assertEqual(num_files, 2)
length = self.tdb.getOne('length', torrent_id=torrent_id)
|
geolovic/TProfiler
|
Tools/debug/Chi_Map_QGIS_testing_suite.py
|
Python
|
gpl-3.0
| 6,907
| 0.000869
|
# -*- coding: iso-8859-15 -*-
#
# Testing Suite for >> Get_Channels.py [QGIS Version]
#
# Copyright (C) 2017 J. Vicente Perez, Universidad de Granada
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from Chi_Map_QGIS import main
import time
import os
def test_00():
"""
Test 00 for Chi_Map [QGIS Version]
Ejecuta Chi_Map sin ningun argumento opcional
"""
inicio = time.time()
print("=" * 40)
print("Test 00 para Chi_Map [QGIS Version]")
print("Executes Chi_Map without optional arguments")
print("No threshold, No heads, No basins")
print("Test in progress...")
# Test parameters
# ===============
dem = "../data/in/darro25.tif"
fac = "../data/in/darro25fac.tif"
out_shp = "../data/out/ChiMap_QGIS_test00.shp"
main(dem, fac, out_shp)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("No se debe generar shapefile al no haber cabeceras")
print("=" * 40)
def test_01():
"""
Test 01 for Chi_Map [QGIS Version]
Ejecuta Chi_Map utilizando solamente un umbral en Celdas
"""
inicio = time.time()
print("=" * 40)
print("Test 01 para Get_Channels [QGIS Version]")
print("Executes Chi_Map for a threshold of 1000 CELL")
print("No heads, No basins")
print("Test in progress...")
# Test parameters
# ===============
dem = "../data/in/darro25.tif"
fac = "../data/in/darro25fac.tif"
out_shp = "../data/out/ChiMap_QGIS_test01.shp"
threshold = 1000
units = "CELL"
main(dem, fac, out_shp, threshold=threshold, units=units)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " + out_shp)
print("=" * 40)
def test_02():
"""
Test 02 for Chi_Map [QGIS Version]
Ejecuta Chi_Map utilizando solamente un umbral en Map Units
"""
inicio = time.time()
print("=" * 40)
print("Test 02 para Chi_Map [QGIS Version]")
print("Executes Get_Channels for a threshold of 625000 m^2")
print("Stores profiles in an output '.dat' file")
print("No heads, No basins")
print("Test in progress...")
# Test parameters
# ===============
dem = "../data/in/darro25.tif"
fac = "../data/in/darro25fac.tif"
out_shp = "../data/out/ChiMap_QGIS_test02.shp"
threshold = 625000
units = "MAP"
main(dem, fac, out_shp, out_file=True, threshold=threshold, units=units)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " + out_shp)
print("Generado tambien" + os.path.splitext(out_shp)[0] + ".dat")
print("=" * 40)
def test_03():
"""
Test 03 for Chi_Map [QGIS Version]
Ejecuta Chi_Map utilizando solo las cabeceras principales
"""
inicio = time.time()
print("=" * 40)
print("Test 03 para Chi_Map [QGIS Version]")
print("Executes Chi_Map only for selected heads")
print("No Threshold, No basins")
print("Test in progress...")
# Test parameters
# ===============
dem = "../data/in/darro25.tif"
fac = "../data/in/darro25fac.tif"
out_shp = "../data/out/ChiMap_QGIS_test03.shp"
head_shp = "../data/in/main_heads.shp"
id_field = "river_id"
main(dem, fac, out_shp, head_shp=head_shp, id_field=id_field)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " + out_shp)
print("=" * 40)
def test_04():
"""
Test 04 for Chi_Map [QGIS Version]
Ejecuta Chi_Map utilizando cabeceras y un umbral
"""
inicio = time.time()
print("=" * 40)
print("Test 04 para Chi_Map [QGIS Version]")
print("Executes Chi_Map for selected heads and a threshold")
print("No basins")
print("Test in progress...")
# Test parameters
# ===============
dem = "../data/in/darro25.tif"
fac = "../data/in/darro25fac.tif"
out_shp = "../data/out/ChiMap_QGIS_test04.shp"
head_shp = "../data/in/main_heads.shp"
id_field = "river_id"
main(dem, fac, out_shp, threshold=1000, units="CELL", head_shp=head_shp, id_field=id_field)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " + out_shp)
print("=" * 40)
def test_05():
"""
Test 05 for Chi_Map [QGIS Version]
Ejecuta Chi_Map utilizando cabeceras, un umbral y una capa de cuencas
"""
inicio = time.time()
print("=" * 40)
print("Test 05 para Chi_Map [QGIS Version]")
print("Executes Chi_Map for selected heads, using a threshold, and inside basins")
print("Test in progress...")
# Test parameters
# ===============
dem = "../data/in/darro25.tif"
fac = "../data/in/darro25fac.tif"
out_shp = "../data/out/ChiMap_QGIS_test05.shp"
head_shp = "../data/in/main_heads.shp"
id_field = "river_id"
basin_shp = "../data/in/cuencas.shp"
main(dem, fac, out_shp, threshold=1000, units="CELL", basin_shp=basin_shp, head_shp=head_shp, id_field=id_field)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " + out_shp)
print("=" * 40)
def test_06():
"""
Test 06 for Chi_Map [QGIS Version]
Ejecuta Chi_Map utilizando cabeceras, un umbral y una capa de cuencas
Tambien Guarda perfiles en archivo .dat
"""
inicio = time.time()
print("=" * 40)
print("Test 06 para Chi_Map [QGIS Version]")
print("Executes Chi_Map using a threshold inside basins, and using some parameters")
print("Stores profiles in an output '.dat' file")
print("Test in progress...")
# Test parameters
# ===============
dem = "../data/in/darro25.tif"
fac = "../data/in/darro25fac.tif"
out_shp = "../data/out/ChiMap_QGIS_test06.shp"
head_shp = "../data/in/main_heads.shp"
id_field = "river_id"
basin_shp = "../data/in/cuencas.shp"
distance = 250
thetaref = 0.4
main(dem, fac, out_shp, out_file=True, threshold=1000, units="CELL", basin_shp=basin_shp, head_shp=head_shp, id_field=id_field,
distance=dis
|
tance, thetaref=thetaref)
fin = time.time()
print("Test finalizado en " + str(fin - inicio) + " segundos")
print("Resultado en " +
|
out_shp)
print("Generado tambien" + os.path.splitext(out_shp)[0] + ".dat")
print("=" * 40)
test_00()
test_01()
test_02()
test_03()
test_04()
test_05()
test_06()
|
pauldardeau/cloud-jukebox
|
jukebox_db.py
|
Python
|
bsd-3-clause
| 18,366
| 0.000436
|
import sqlite3
import jukebox
from song_metadata import SongMetadata
from file_metadata import FileMetadata
class JukeboxDB:
def __init__(self, metadata_db_file_path=None, debug_print=False):
self.debug_print = debug_print
self.db_connection = None
if metadata_db_file_path is not None and len(metadata_db_file_path) > 0:
self.metadata_db_file_path = metadata_db_file_path
else:
self.metadata_db_file_path = 'jukebox_db.sqlite3'
def is_open(self):
return self.db_connection is not None
def open(self):
self.close()
open_success = False
self.db_connection = sqlite3.connect(self.metadata_db_file_path)
if self.db_connection is not None:
if not self.have_tables():
open_success = self.create_tables()
if not open_success:
print('error: unable to create all tables')
else:
open_success = True
return open_success
def close(self):
did_close = False
if self.db_connection is not None:
self.db_connection.close()
self.db_connection = None
did_close = True
return did_close
def __enter__(self):
# look for stored metadata in the storage system
self.db_connection = sqlite3.connect(self.metadata_db_file_path)
if self.db_connection is not None:
if self.debug_print:
print("have db connection")
else:
print("unable to connect to database")
return self
def __exit__(self, exception_type, exception_value, traceback):
if self.db_connection is not None:
self.db_connection.close()
self.db_connection = None
def create_table(self, sql):
try:
table_created = self.db_connection.execute(sql)
if not table_created:
print('creation of table failed')
print('%s' % sql)
return table_created
except sqlite3.Error as e:
print('error creating table: ' + e.args[0])
return False
def create_tables(self):
if self.db_connection is not None:
if self.debug_print:
print("creating tables")
create_genre_table = "CREATE TABLE genre (" + \
"genre_uid TEXT UNIQUE NOT NULL," + \
"genre_name TEXT UNIQUE NOT NULL," + \
"genre_description TEXT)"
create_artist_table = "CREATE TABLE artist (" + \
"artist_uid TEXT UNIQUE NOT NULL," + \
"artist_name TEXT UNIQUE NOT NULL," + \
"artist_description TEXT)"
create_album_t
|
able = "CREATE TABLE album (" + \
"album_uid TEXT UNIQUE NOT NULL," + \
"album_name TEXT UNIQUE NOT NULL," + \
"album_description TEXT,"
|
+ \
"artist_uid TEXT NOT NULL REFERENCES artist(artist_uid)," + \
"genre_uid TEXT REFERENCES genre(genre_uid))"
create_song_table = "CREATE TABLE song (" + \
"song_uid TEXT UNIQUE NOT NULL," + \
"file_time TEXT," + \
"origin_file_size INTEGER," + \
"stored_file_size INTEGER," + \
"pad_char_count INTEGER," + \
"artist_name TEXT," + \
"artist_uid TEXT REFERENCES artist(artist_uid)," + \
"song_name TEXT NOT NULL," + \
"md5_hash TEXT NOT NULL," + \
"compressed INTEGER," + \
"encrypted INTEGER," + \
"container_name TEXT NOT NULL," + \
"object_name TEXT NOT NULL," + \
"album_uid TEXT REFERENCES album(album_uid))"
create_playlist_table = "CREATE TABLE playlist (" + \
"playlist_uid TEXT UNIQUE NOT NULL," + \
"playlist_name TEXT UNIQUE NOT NULL," + \
"playlist_description TEXT)"
create_playlist_song_table = "CREATE TABLE playlist_song (" + \
"playlist_song_uid TEXT UNIQUE NOT NULL," + \
"playlist_uid TEXT NOT NULL REFERENCES playlist(playlist_uid)," + \
"song_uid TEXT NOT NULL REFERENCES song(song_uid))"
try:
return self.create_table(create_genre_table) and \
self.create_table(create_artist_table) and \
self.create_table(create_album_table) and \
self.create_table(create_song_table) and \
self.create_table(create_playlist_table) and \
self.create_table(create_playlist_song_table)
except sqlite3.Error as e:
print('error creating table: ' + e.args[0])
return False
def have_tables(self):
have_tables_in_db = False
if self.db_connection is not None:
sql = "SELECT name " + \
"FROM sqlite_master " + \
"WHERE type='table' AND name='song'"
cursor = self.db_connection.cursor()
cursor.execute(sql)
name = cursor.fetchone()
if name is not None:
have_tables_in_db = True
return have_tables_in_db
def id_for_artist(self, artist_name):
pass
def id_for_album(self, artist_name, album_name):
pass
def insert_artist(self, artist_name):
pass
def insert_album(self, album_name, artist_id):
pass
def albums_for_artist(self, artist_id):
pass
def get_artists(self):
pass
def songs_for_album(self, album_id):
pass
def songs_for_artist(self, artist_id):
pass
def get_playlists(self):
pass
def get_playlist(self, playlist_name):
pl_object = None
if playlist_name is not None and len(playlist_name) > 0:
db_cursor = self.db_connection.cursor()
sql = "SELECT playlist_uid FROM playlist WHERE playlist_name = ?"
db_results = db_cursor.execute(sql, [playlist_name])
for row in db_results:
pl_object = row[0]
break
return pl_object
def songs_for_query(self, sql, query_args=None):
result_songs = []
db_cursor = self.db_connection.cursor()
if query_args is not None:
db_results = db_cursor.execute(sql, query_args)
else:
db_results = db_cursor.execute(sql)
for row in db_results:
song = SongMetadata()
song.fm = FileMetadata()
song.fm.file_uid = row[0]
song.fm.file_time = row[1]
song.fm.origin_file_size = row[2]
song.fm.stored_file_size = row[3]
song.fm.pad_char_count = row[4]
song.artist_name = row[5]
song.artist_uid = row[6]
song.song_name = row[7]
song.fm.md5_hash = row[8]
song.fm.compressed = row[9]
song.fm.encrypted = row[10]
song.fm.container_name = row[11]
song.fm.object_name = row[12]
song.album_uid = row[13]
result_songs.append(song)
return result_songs
def retrieve_song(self, file_name):
if self.db_connection is not None:
sql = """SELECT song_uid,
file_time,
origin_file_size,
stored_file_size,
pad_char_count,
artist_name,
|
lrosengreen/cloudberry
|
server.py
|
Python
|
gpl-3.0
| 2,533
| 0.006711
|
#!/usr/bin/env python
from __future__ import division, print_function
# cloudberryCam v0 copyright (c) 2013-2015 Lars Rosengreen
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import os.path
import socket
import cherrypy
from cherrypy.lib.static import serve_file
current_dir = os.path.dirname(os.path.abspath(__file__))
class Root:
@cherrypy.expose
def index(self):
return serve_file(os.path.join(current_dir,"static/viewer.html"))
# API
class FreeSpace:
exposed = True
def GET(self):
s = os.statvfs('/')
free_space = (s.f_bavail * s.f_frsize) / 1.0e9 # in gigabytes
return json.dumps(free_space)
cherrypy.tree.mount(FreeSpace(),
'/api/freespace',
{'/': {'request.dispatch': cherrypy.dispatch.MethodDispatcher()}})
class Hostname:
exposed = True
def GET(self):
hostname = socket.gethostname()
return json.dumps(hostname)
cherrypy.tree.mount(Hostname(),
'/api/hostname',
{'/': {'request.dispatch': cherrypy.dispatch.MethodDispatcher()}})
def run(testing=False):
# Set up site-wide
|
config first so we get a log if errors occur.
cherrypy.config.update({'environment': 'production',
'log.error_file': 'site.log',
'log.screen': False})
conf = {'/previews': {'tools.staticdir.on'
|
: True,
'tools.staticdir.dir': '/mnt/ramdisk/previews'},
'/static': {'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(current_dir, 'static')}}
cherrypy.server.socket_host = '0.0.0.0'
#cherrypy.server.socket_host = '::' # for Mac (uses IPV6)
if testing == True:
cherrypy.engine.autoreload.subscribe()
cherrypy.config.update({'log.screen': True})
cherrypy.quickstart(Root(), '/', config=conf)
if __name__ == '__main__':
run(testing=True)
|
vfulco/twilio_server
|
twiml_server/common/util.py
|
Python
|
gpl-3.0
| 541
| 0
|
import types
from flask import jsonify
from werkzeug.excep
|
tions import default_exceptions
from werkzeug.exceptions import HTTPException
from twiml_server import app
def make_json_app():
def make_json_error(ex):
response = jsonify(message=str(ex))
response.status_code = (ex.code
if isinstance(ex, HTTPException)
else 500)
return response
for code in default_exceptions.iterkeys():
app.error_handler_spec[None][
|
code] = make_json_error
|
badele/home-assistant
|
tests/components/light/test_mqtt.py
|
Python
|
mit
| 5,134
| 0
|
"""
tests.components.light.test_mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests mqtt light.
config for RGB Version with brightness:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB and brightness:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
qos: 0
payload_on: "on"
payload_off: "off"
"""
import unittest
import homeassistant.util.color as color_util
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.core as ha
import homeassistant.components.light as light
from tests.common import mock_mqtt_component, fire_mqtt_message
class TestLightMQTT(unittest.TestCase):
""" Test the MQTT light. """
def setUp(self): # pylint: disable=invalid-name
self.ha
|
ss = ha.HomeAssistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down s
|
tuff we started. """
self.hass.stop()
def test_controlling_state_via_topic(self):
self.assertTrue(light.setup(self.hass, {
'light': {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_light_rgb/status',
'command_topic': 'test_light_rgb/set',
'brightness_state_topic': 'test_light_rgb/brightness/status',
'brightness_command_topic': 'test_light_rgb/brightness/set',
'rgb_state_topic': 'test_light_rgb/rgb/status',
'rgb_command_topic': 'test_light_rgb/rgb/set',
'qos': 0,
'payload_on': 'on',
'payload_off': 'off'
}
}))
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'on')
self.hass.pool.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'off')
self.hass.pool.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'on')
self.hass.pool.block_till_done()
fire_mqtt_message(self.hass, 'test_light_rgb/brightness/status', '100')
self.hass.pool.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.pool.block_till_done()
self.assertEqual(100,
light_state.attributes['brightness'])
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'on')
self.hass.pool.block_till_done()
fire_mqtt_message(self.hass, 'test_light_rgb/rgb/status',
'125,125,125')
self.hass.pool.block_till_done()
light_state = self.hass.states.get('light.test')
self.assertEqual([125, 125, 125],
light_state.attributes.get('rgb_color'))
def test_sending_mqtt_commands_and_optimistic(self):
self.assertTrue(light.setup(self.hass, {
'light': {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'brightness_state_topic': 'test_light_rgb/brightness/status',
'brightness_command_topic': 'test_light_rgb/brightness/set',
'rgb_state_topic': 'test_light_rgb/rgb/status',
'rgb_command_topic': 'test_light_rgb/rgb/set',
'qos': 2,
'payload_on': 'on',
'payload_off': 'off'
}
}))
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
light.turn_on(self.hass, 'light.test')
self.hass.pool.block_till_done()
self.assertEqual(('test_light_rgb/set', 'on', 2),
self.mock_publish.mock_calls[-1][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
light.turn_off(self.hass, 'light.test')
self.hass.pool.block_till_done()
self.assertEqual(('test_light_rgb/set', 'off', 2),
self.mock_publish.mock_calls[-1][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
|
yslin/tools-zodlin
|
ubuntu/vim/.vim/lang/all/ultisnips/test/test_Mirror.py
|
Python
|
apache-2.0
| 8,787
| 0.000114
|
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
class TextTabStopTextAfterTab_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 Hinten\n$1')
keys = 'test' + EX + 'hallo'
wanted = 'hallo Hinten\nhallo'
class TextTabStopTextBeforeTab_ExpectCorrectResult(_VimTest):
snippets = ('test', 'Vorne $1\n$1')
keys = 'test' + EX + 'hallo'
wanted = 'Vorne hallo\nhallo'
class TextTabStopTextSurroundedTab_ExpectCorrectResult(_VimTest):
snippets = ('test', 'Vorne $1 Hinten\n$1')
keys = 'test' + EX + 'hallo test'
wanted = 'Vorne hallo test Hinten\nhallo test'
class TextTabStopTextBeforeMirror_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\nVorne $1')
keys = 'test' + EX + 'hallo'
wanted = 'hallo\nVorne hallo'
class TextTabStopAfterMirror_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1 Hinten')
keys = 'test' + EX + 'hallo'
wanted = 'hallo\nhallo Hinten'
class TextTabStopSurroundMirror_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\nVorne $1 Hinten')
keys = 'test' + EX + 'hallo welt'
wanted = 'hallo welt\nVorne hallo welt Hinten'
class TextTabStopAllSurrounded_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ObenVorne $1 ObenHinten\nVorne $1 Hinten')
keys = 'test' + EX + 'hallo welt'
wanted = 'ObenVorne hallo welt ObenHinten\nVorne hallo welt Hinten'
class MirrorBeforeTabstopLeave_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 ${1:this is it} $1')
keys = 'test' + EX
wanted = 'this is it this is it this is it'
class MirrorBeforeTabstopOverwrite_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 ${1:this is it} $1')
keys = 'test' + EX + 'a'
wanted = 'a a a'
class TextTabStopSimpleMirrorMultiline_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1')
keys = 'test' + EX + 'hallo'
wanted = 'hallo\nhallo'
class SimpleMirrorMultilineMany_ExpectCorrectResult(_VimTest):
snippets = ('test', ' $1\n$1\na$1b\n$1\ntest $1 mich')
keys = 'test' + EX + 'hallo'
wanted = ' hallo\nhallo\nahallob\nhallo\ntest hallo mich'
class MultilineTabStopSimpleMirrorMultiline_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n\n$1\n\n$1')
keys = 'test' + EX + 'hallo Du\nHi'
wanted = 'hallo Du\nHi\n\nhallo Du\nHi\n\nhallo Du\nHi'
class MultilineTabStopSimpleMirrorMultiline1_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1\n$1')
keys = 'test' + EX + 'hallo Du\nHi'
wanted = 'hallo Du\nHi\nhallo Du\nHi\nhallo Du\nHi'
class MultilineTabStopSimpleMirrorDeleteInLine_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1\n$1')
keys = 'test' + EX + 'hallo Du\nHi\b\bAch Blah'
wanted = 'hallo Du\nAch Blah\nhallo Du\nAch Blah\nhallo Du\nAch Blah'
class TextTabStopSimpleMirrorMultilineMirrorInFront_ECR(_VimTest):
snippets = ('test', '$1\n${1:sometext}')
keys = 'test' + EX + 'hallo\nagain'
wanted = 'hallo\nagain\nhallo\nagain'
class SimpleMirrorDelete_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1')
keys = 'test' + EX + 'hallo\b\b'
wanted = 'hal\nhal'
class SimpleMirrorSameLine_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 $1')
keys = 'test' + EX + 'hallo'
wanted = 'hallo hallo'
class SimpleMirrorSameLine_InText_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 $1')
keys = 'ups test blah' + ESC + '02f i' + EX + 'hallo'
wanted = 'ups hallo hallo blah'
class SimpleMirrorSameLineBeforeTabDefVal_ECR(_VimTest):
snippets = ('test', '$1 ${1:replace me}')
keys = 'test' + EX + 'hallo foo'
wanted = 'hallo foo hallo foo'
class SimpleMirrorSameLineBeforeTabDefVal_DelB4Typing_ECR(_VimTest):
snippets = ('test', '$1 ${1:replace me}')
keys = 'test' + EX + BS + 'hallo foo'
wanted = 'hallo foo hallo foo'
class SimpleMirrorSameLineMany_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 $1 $1 $1')
keys = 'test' + EX + 'hallo du'
wanted = 'hallo du hallo du hallo du hallo du'
class SimpleMirrorSameLineManyMultiline_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 $1 $1 $1')
keys = 'test' + EX + 'hallo du\nwie gehts'
wanted = 'hallo du\nwie gehts hallo du\nwie gehts hallo du\nwie gehts' \
' hallo du\nwie gehts'
class SimpleMirrorDeleteSomeEnterSome_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1')
keys = 'test' + EX + 'hallo\b\bhups'
wanted = 'halhups\nhalhups'
class SimpleTabstopWithDefaultSimpelType_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:defa}\n$1')
keys = 'test' + EX + 'world'
wanted = 'ha world\nworld'
class SimpleTabstopWithDefaultComplexType_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:default value} $1\nanother: $1 mirror')
keys = 'test' + EX + 'world'
wanted = 'ha world world\nanother: world mirror'
class SimpleTabstopWithDefaultSimpelKeep_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:defa}\n$1')
keys = 'test' + EX
wanted = 'ha defa\ndefa'
class SimpleTabstopWithDefaultComplexKeep_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:default value} $1\nanother: $1 mirror')
keys = 'test' + EX
wanted = 'ha default value default value\nanother: default value mirror'
class TabstopWithMirrorManyFromAll_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $5 ${1:blub} $4 $0 ${2:$1.h} $1 $3 ${4:More}')
keys = 'test' + EX + 'hi' + JF + 'hu' + JF + 'hub' + JF + 'hulla' + \
JF + 'blah' + JF + 'end'
wanted = 'ha blah hi hulla end hu hi hub hulla'
class TabstopWithMirrorInDefaultNoType_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:blub} ${2:$1.h}')
keys = 'test' + EX
wanted = 'ha blub blub.h'
class TabstopWithMirrorInDefaultNoType1_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:blub} ${2:$1}')
keys = 'test' + EX
wanted = 'ha blub blub'
class TabstopWithMirrorInDefaultTwiceAndExtra_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:$1.h $1.c}\ntest $1')
keys = 'test' + EX + 'stdin'
wanted = 'ha stdin stdin.h stdin.c\ntest stdin'
class TabstopWithMirrorInDefaultMultipleLeave_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:snip} ${3:$1.h $2}')
keys = 'test' + EX + 'stdin'
wanted = 'ha stdin snip stdin.h snip'
class TabstopWithMirrorInDefaultMultipleOverwrite_ExpectCorrectResult(
_VimTest):
snippets = ('test', 'ha $1 ${2:snip} ${3:$1.h $2}')
keys = 'test' + EX + 'stdin' + JF + 'do snap'
wanted = 'ha stdin do snap stdin.h do snap'
class TabstopWithMirrorInDefaultOverwrite_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:$1.h}')
keys = 'test' + EX + 'stdin' + JF + 'overwritten'
wanted = 'ha stdin overwritten'
class TabstopWithMirrorInDefaultOverwrite1_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:$1}')
keys = 'test' + EX + 'stdin' + JF + 'overwritten'
wanted = 'ha stdin overwritten'
class TabstopWithMirrorInDefaultNoOverwrite1_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:$1}')
keys = 'test' + EX + 'stdin' + JF
|
+ JF + 'end'
wanted = 'ha stdin stdinend'
class MirrorRealLifeExample_ExpectCorrectResult(_VimTest):
snippets = (
('for', 'for(size_t ${2:i} = 0; $2 < ${1:count}; ${3:++$2})'
'\n{\n\t${0:/* code */}\n}'),
)
keys = 'for' + EX + '100' + JF + 'avar\b\b\b\ba_variable' + JF + \
'a_variable *= 2' + JF + '// do
|
nothing'
wanted = """for(size_t a_variable = 0; a_variable < 100; a_variable *= 2)
{
\t// do nothing
}"""
class Mirror_TestKill_InsertBefore_NoKill(_VimTest):
snippets = 'test', '$1 $1_'
keys = 'hallo test' + EX + 'auch' + ESC + \
'wihi' + ESC + 'bb' + 'ino' + JF + 'end'
wanted = 'hallo noauch hinoauch_end'
class Mirror_TestKill_InsertAfter_NoKill(_VimTest):
snippets = 'test', '$1 $1_'
keys = 'hallo test' + EX + 'auch' + ESC + \
'eiab' + ESC + 'bb' + 'ino' + JF + 'end'
wanted = 'hallo noauch noauchab_end'
cla
|
GoogleCloudPlatform/python-docs-samples
|
functions/billing/main.py
|
Python
|
apache-2.0
| 7,657
| 0.000522
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START functions_billing_limit]
# [START functions_billing_limit_appengine]
# [START functions_billing_stop]
# [START functions_billing_slack]
import base64
import json
import os
# [END functions_billing
|
_stop]
# [END functions_billing_limit]
# [END functions_billing_limit_appengine]
# [END functions_billing_slack]
# [START functions_billing_limit]
# [START functions_billing_limit_appengine]
# [START functions_billing_stop]
from googleapiclient import discovery
# [END functions_billing_stop]
#
|
[END functions_billing_limit]
# [END functions_billing_limit_appengine]
# [START functions_billing_slack]
import slack
from slack.errors import SlackApiError
# [END functions_billing_slack]
# [START functions_billing_limit]
# [START functions_billing_stop]
PROJECT_ID = os.getenv('GCP_PROJECT')
PROJECT_NAME = f'projects/{PROJECT_ID}'
# [END functions_billing_stop]
# [END functions_billing_limit]
# [START functions_billing_slack]
# See https://api.slack.com/docs/token-types#bot for more info
BOT_ACCESS_TOKEN = 'xxxx-111111111111-abcdefghidklmnopq'
CHANNEL = 'C0XXXXXX'
slack_client = slack.WebClient(token=BOT_ACCESS_TOKEN)
def notify_slack(data, context):
pubsub_message = data
# For more information, see
# https://cloud.google.com/billing/docs/how-to/budgets-programmatic-notifications#notification_format
try:
notification_attr = json.dumps(pubsub_message['attributes'])
except KeyError:
notification_attr = "No attributes passed in"
try:
notification_data = base64.b64decode(data['data']).decode('utf-8')
except KeyError:
notification_data = "No data passed in"
# This is just a quick dump of the budget data (or an empty string)
# You can modify and format the message to meet your needs
budget_notification_text = f'{notification_attr}, {notification_data}'
try:
slack_client.api_call(
'chat.postMessage',
json={
'channel': CHANNEL,
'text' : budget_notification_text
}
)
except SlackApiError:
print('Error posting to Slack')
# [END functions_billing_slack]
# [START functions_billing_stop]
def stop_billing(data, context):
pubsub_data = base64.b64decode(data['data']).decode('utf-8')
pubsub_json = json.loads(pubsub_data)
cost_amount = pubsub_json['costAmount']
budget_amount = pubsub_json['budgetAmount']
if cost_amount <= budget_amount:
print(f'No action necessary. (Current cost: {cost_amount})')
return
if PROJECT_ID is None:
print('No project specified with environment variable')
return
billing = discovery.build(
'cloudbilling',
'v1',
cache_discovery=False,
)
projects = billing.projects()
billing_enabled = __is_billing_enabled(PROJECT_NAME, projects)
if billing_enabled:
__disable_billing_for_project(PROJECT_NAME, projects)
else:
print('Billing already disabled')
def __is_billing_enabled(project_name, projects):
"""
Determine whether billing is enabled for a project
@param {string} project_name Name of project to check if billing is enabled
@return {bool} Whether project has billing enabled or not
"""
try:
res = projects.getBillingInfo(name=project_name).execute()
return res['billingEnabled']
except KeyError:
# If billingEnabled isn't part of the return, billing is not enabled
return False
except Exception:
print('Unable to determine if billing is enabled on specified project, assuming billing is enabled')
return True
def __disable_billing_for_project(project_name, projects):
"""
Disable billing for a project by removing its billing account
@param {string} project_name Name of project disable billing on
"""
body = {'billingAccountName': ''} # Disable billing
try:
res = projects.updateBillingInfo(name=project_name, body=body).execute()
print(f'Billing disabled: {json.dumps(res)}')
except Exception:
print('Failed to disable billing, possibly check permissions')
# [END functions_billing_stop]
# [START functions_billing_limit]
ZONE = 'us-west1-b'
def limit_use(data, context):
pubsub_data = base64.b64decode(data['data']).decode('utf-8')
pubsub_json = json.loads(pubsub_data)
cost_amount = pubsub_json['costAmount']
budget_amount = pubsub_json['budgetAmount']
if cost_amount <= budget_amount:
print(f'No action necessary. (Current cost: {cost_amount})')
return
compute = discovery.build(
'compute',
'v1',
cache_discovery=False,
)
instances = compute.instances()
instance_names = __list_running_instances(PROJECT_ID, ZONE, instances)
__stop_instances(PROJECT_ID, ZONE, instance_names, instances)
def __list_running_instances(project_id, zone, instances):
"""
@param {string} project_id ID of project that contains instances to stop
@param {string} zone Zone that contains instances to stop
@return {Promise} Array of names of running instances
"""
res = instances.list(project=project_id, zone=zone).execute()
if 'items' not in res:
return []
items = res['items']
running_names = [i['name'] for i in items if i['status'] == 'RUNNING']
return running_names
def __stop_instances(project_id, zone, instance_names, instances):
"""
@param {string} project_id ID of project that contains instances to stop
@param {string} zone Zone that contains instances to stop
@param {Array} instance_names Names of instance to stop
@return {Promise} Response from stopping instances
"""
if not len(instance_names):
print('No running instances were found.')
return
for name in instance_names:
instances.stop(
project=project_id,
zone=zone,
instance=name).execute()
print(f'Instance stopped successfully: {name}')
# [END functions_billing_limit]
# [START functions_billing_limit_appengine]
APP_NAME = os.getenv('GCP_PROJECT')
def limit_use_appengine(data, context):
pubsub_data = base64.b64decode(data['data']).decode('utf-8')
pubsub_json = json.loads(pubsub_data)
cost_amount = pubsub_json['costAmount']
budget_amount = pubsub_json['budgetAmount']
if cost_amount <= budget_amount:
print(f'No action necessary. (Current cost: {cost_amount})')
return
appengine = discovery.build(
'appengine',
'v1',
cache_discovery=False
)
apps = appengine.apps()
# Get the target app's serving status
target_app = apps.get(appsId=APP_NAME).execute()
current_status = target_app['servingStatus']
# Disable target app, if necessary
if current_status == 'SERVING':
print(f'Attempting to disable app {APP_NAME}...')
body = {'servingStatus': 'USER_DISABLED'}
apps.patch(appsId=APP_NAME, updateMask='serving_status', body=body).execute()
# [END functions_billing_limit_appengine]
|
JohnRandom/django-aggregator
|
dasdocc/conf/staging.py
|
Python
|
bsd-3-clause
| 29
| 0.034483
|
from dasdocc.conf import b
|
ase
|
|
austinwagner/sublime-sourcepawn
|
watchdog/events.py
|
Python
|
mit
| 18,129
| 0.006619
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.events
:synopsis: File system events and event handlers.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
Event Classes
-------------
.. autoclass:: FileSystemEvent
:members:
:show-inheritance:
:inherited-members:
.. autoclass:: FileSystemMovedEvent
:members:
:show-inheritance:
.. autoclass:: FileMovedEvent
:members:
:show-inheritance:
.. autoclass:: DirMovedEvent
:members:
:show-inheritance:
.. autoclass:: FileModifiedEvent
:members:
:show-inheritance:
.. autoclass:: DirModifiedEvent
:members:
:show-inheritance:
.. autoclass:: FileCreatedEvent
:members:
:show-inheritance:
.. autoclass:: DirCreatedEvent
:members:
:show-inheritance:
.. autoclass:: FileDeletedEvent
:members:
:show-inheritance:
.. autoclass:: DirDeletedEvent
:members:
:show-inheritance:
Event Handler Classes
---------------------
.. autoclass:: FileSystemEventHandler
:members:
:show-inheritance:
.. autoclass:: PatternMatchingEventHandler
:members:
:show-inheritance:
.. autoclass:: RegexMatchingEventHandler
:members:
:show-inheritance:
.. autoclass:: LoggingEventHandler
:members:
:show-inheritance:
"""
import os.path
import logging
import re
from pathtools.path import absolute_path
from pathtools.patterns import match_any_paths
from watchdog.utils import has_attribute
EVENT_TYPE_MOVED = 'moved'
EVENT_TYPE_DELETED = 'deleted'
EVENT_TYPE_CREATED = 'created'
EVENT_TYPE_MODIFIED = 'modified'
class FileSystemEvent(object):
"""
Immutable type that represents a file system event that is triggered
when a change occurs on the monitored file system.
All FileSystemEvent objects are required to be immutable and hence
can be used as keys in dictionaries or be added to sets.
"""
def __init__(self, event_type, src_path, is_directory=False):
self._src_path = src_path
self._is_directory = is_directory
self._event_type = event_type
@property
def is_directory(self):
"""True if event was emitted for a directory; False otherwise."""
return self._is_directory
@property
def src_path(self):
"""Source path of the file system object that triggered this event."""
return self._src_path
@property
def event_type(self):
"""The type of the event as a string."""
return self._event_type
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<%(class_name)s: event_type=%(event_type)s, \
src_path=%(src_path)s, is_directory=%(is_directory)s>" %\
dict(class_name=self.__class__.__name__,
event_type=self.event_type,
src_path=self.src_path,
is_directory=self.is_directory)
# Used for comparison of events.
@property
def key(self):
return (self.event_type,
self.src_path,
self.is_directory)
def __eq__(self, event):
return self.key == event.key
def __ne__(self, event):
return self.key != event.key
def __hash__(self):
return hash(self.key)
class FileSystemMovedEvent(FileSystemEvent):
"""
File system event representing any kind of file system movement.
"""
def __init__(self, src_path, dest_path, is_directory):
super(FileSystemMovedEvent, self).__init__(event_type=EVENT_TYPE_MOVED,
src_path=src_path,
is_directory=is_directory)
self._dest_path = dest_path
@property
def dest_path(self):
"""The destination path of the move event."""
return self._dest_path
# Used for hashing this as an immutable object.
def _key(self):
return (self.event_type,
self.src_path,
self.dest_path,
self.is_directory)
def __repr__(self):
return "<%(class_name)s: src_path=%(src_path)s, dest_path=%(dest_path)s, \
is_directory=%(is_directory)s>" %\
dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path,
is_directory=self.is_directory)
# File events.
class FileDeletedEvent(FileSystemEvent):
"""File system event representing file deletion on the file system."""
def __init__(self, src_path):
super(FileDeletedEvent, self).__init__(event_type=EVENT_TYPE_DELETED,
src_path=src_path)
def __repr__(self):
return "<%(class_name)s: src_path=%(src_path)s>" %\
dict(class_name=self.__class__.__name__,
src_path=self.src_path)
class FileModifiedEvent(FileSystemEvent):
"""File system event representing file modification on the file system."""
def __init__(self, src_path):
super(FileModifiedEvent, self).__init__(event_type=EVENT_TYPE_MODIFIED,
src_path=src_path)
def __repr__(self):
return "<%(class_name)s: src_path=%(src_path)s>" %\
dict(class_name=self.__class__.__name__,
src_path=self.src_path)
class FileCreatedEvent(FileSystemEvent):
"""File system event representing file creation on the file system."""
def __init__(self, src_path):
super(FileCreatedEvent, self).__init__(event_type=EVENT_TYPE_CREATED,
src_path=src_path)
def __repr__(self):
return "<%
|
(class_name)s: src_path=%(src_path)s>" %\
dict(class_name=self.__class__.__name__,
src_path=self.src_path)
class FileMovedEvent(FileSystemMovedEvent):
"""File system event representing file movement on the file system."""
def __init__(self, src_path, dest_path):
super(FileMovedEvent, self).__init__(src_path=src_path,
dest_path=dest_path,
|
is_directory=False)
def __repr__(self):
return "<%(class_name)s: src_path=%(src_path)s, \
dest_path=%(dest_path)s>" %\
dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path)
# Directory events.
class DirDeletedEvent(FileSystemEvent):
"""File system event representing directory deletion on the file system."""
def __init__(self, src_path):
super(DirDeletedEvent, self).__init__(event_type=EVENT_TYPE_DELETED,
src_path=src_path,
is_directory=True)
def __repr__(self):
return "<%(class_name)s: src_path=%(src_path)s>" %\
dict(class_name=self.__class__.__name__,
src_path=self.src_path)
class DirModifiedEvent(FileSystemEvent):
"""
File system event representing directory modification on the file system.
"""
def __init__(self, src_path):
super(DirModifiedEvent, self).__init__(event_type=EVENT_TYPE_MODIFIED,
src_path=src_path,
is_directory=True)
def __repr__(self):
return "<%(class_name)s: src_path=%(src_path)s>" %\
dict(class_name=self.__class__.__name__,
src_path=self.src_path)
class DirCreatedEvent(FileSystemEvent):
"""File system event representing directory creation on the file system."""
def __init__(self, src_path):
super(DirCreatedEvent, self).__init__(event_type=EVENT_TYPE_CREATED,
src_path=src_path,
is_di
|
thonkify/thonkify
|
src/lib/libpasteurize/fixes/fix_kwargs.py
|
Python
|
mit
| 6,066
| 0.001813
|
u"""
Fixer for Python 3 function parameter syntax
This fixer is rather sensitive to incorrect py3k syntax.
"""
# Note: "relevant" parameters are parameters following the first STAR in the list.
from lib2to3 import fixer_base
from lib2to3.fixer_util import token, String, Newline, Comma, Name
from libfuturize.fixer_util import indentation, suitify, DoubleStar
_assign_template = u"%(name)s = %(kwargs)s['%(name)s']; del %(kwargs)s['%(name)s']"
_if_template = u"if '%(name)s' in %(kwargs)s: %(assign)s"
_else_template = u"else: %(name)s = %(default)s"
_kwargs_default_name = u"_3to2kwargs"
def gen_params(raw_params):
u"""
Generator that yields tuples of (name, default_value) for each parameter in the list
If no default is given, then it is default_value is None (not Leaf(token.NAME, 'None'))
"""
assert raw_params[0].type == token.STAR and len(raw_params) > 2
curr_idx = 2 # the first place a keyword-only parameter name can be is index 2
max_idx = len(raw_params)
while curr_idx < max_idx:
curr_item = raw_params[curr_idx]
prev_item = curr_item.prev_sibling
if curr_item.type != token.NAME:
c
|
urr_idx += 1
continue
if prev_item is not None and prev_item.type == token.DOUBLESTAR:
break
name = curr_item.value
nxt = curr_item.next_sibling
if nxt is not None and nxt.type == token.EQUAL:
default_value = nxt.next_sibling
curr_idx += 2
else:
default_value = None
yield (name, default_value)
curr_idx += 1
def remove_params(raw_
|
params, kwargs_default=_kwargs_default_name):
u"""
Removes all keyword-only args from the params list and a bare star, if any.
Does not add the kwargs dict if needed.
Returns True if more action is needed, False if not
(more action is needed if no kwargs dict exists)
"""
assert raw_params[0].type == token.STAR
if raw_params[1].type == token.COMMA:
raw_params[0].remove()
raw_params[1].remove()
kw_params = raw_params[2:]
else:
kw_params = raw_params[3:]
for param in kw_params:
if param.type != token.DOUBLESTAR:
param.remove()
else:
return False
else:
return True
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u''
class FixKwargs(fixer_base.BaseFix):
run_order = 7 # Run after function annotations are removed
PATTERN = u"funcdef< 'def' NAME parameters< '(' arglist=typedargslist< params=any* > ')' > ':' suite=any >"
def transform(self, node, results):
params_rawlist = results[u"params"]
for i, item in enumerate(params_rawlist):
if item.type == token.STAR:
params_rawlist = params_rawlist[i:]
break
else:
return
# params is guaranteed to be a list starting with *.
# if fixing is needed, there will be at least 3 items in this list:
# [STAR, COMMA, NAME] is the minimum that we need to worry about.
new_kwargs = needs_fixing(params_rawlist)
# new_kwargs is the name of the kwargs dictionary.
if not new_kwargs:
return
suitify(node)
# At this point, params_rawlist is guaranteed to be a list
# beginning with a star that includes at least one keyword-only param
# e.g., [STAR, NAME, COMMA, NAME, COMMA, DOUBLESTAR, NAME] or
# [STAR, COMMA, NAME], or [STAR, COMMA, NAME, COMMA, DOUBLESTAR, NAME]
# Anatomy of a funcdef: ['def', 'name', parameters, ':', suite]
# Anatomy of that suite: [NEWLINE, INDENT, first_stmt, all_other_stmts]
# We need to insert our new stuff before the first_stmt and change the
# first_stmt's prefix.
suite = node.children[4]
first_stmt = suite.children[2]
ident = indentation(first_stmt)
for name, default_value in gen_params(params_rawlist):
if default_value is None:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_assign_template % {u'name': name, u'kwargs': new_kwargs}, prefix=ident))
else:
suite.insert_child(2, Newline())
suite.insert_child(2, String(_else_template % {u'name': name, u'default': default_value}, prefix=ident))
suite.insert_child(2, Newline())
suite.insert_child(2, String(
_if_template % {u'assign': _assign_template % {u'name': name, u'kwargs': new_kwargs}, u'name': name,
u'kwargs': new_kwargs}, prefix=ident))
first_stmt.prefix = ident
suite.children[2].prefix = u""
# Now, we need to fix up the list of params.
must_add_kwargs = remove_params(params_rawlist)
if must_add_kwargs:
arglist = results[u'arglist']
if len(arglist.children) > 0 and arglist.children[-1].type != token.COMMA:
arglist.append_child(Comma())
arglist.append_child(DoubleStar(prefix=u" "))
arglist.append_child(Name(new_kwargs))
|
txomon/vdsm
|
vdsm/network/tc/qdisc.py
|
Python
|
gpl-2.0
| 4,910
| 0
|
# Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from fractions import Fraction
from functools import partial
from . import _parser
from . import _wrapper
_TC_PRIO_MAX = 15
def add(dev, kind, parent=None, handle=None, **opts):
command = ['qdisc', 'add', 'dev', dev]
if kind != 'ingress':
if parent is None:
command.append('root')
else:
command += ['parent', parent]
if handle is not None:
command += ['handle', handle]
command.append(kind)
for key, value in opts.items():
command += [key, value]
_wrapper.process_request(command)
def delete(dev, kind=None, parent=None, handle=None, **opts):
command = ['qdisc', 'del', 'dev', dev]
if kind != 'ingress':
if parent is None:
command.append('root')
else:
command += ['parent', parent]
if handle is not None:
command += ['handle', handle]
if kind is not None:
command.append(kind)
for key, value in opts.items():
command += [key, value]
_wrapper.process_request(command)
def replace(dev, kind, parent=None, handle=None, **opts):
co
|
mmand = ['qdisc', 'replace', 'dev', dev]
if kind != 'ingress':
if parent is None:
command.append('root')
else:
command += ['parent', parent]
if handle is not None:
command += ['handle', handle]
command.append(kind)
for key, value in opts.items():
command += [key, value]
_wrapper.process_request(command)
def show(dev=None):
command = ['qdisc', 'show']
if dev:
co
|
mmand += ['dev', dev]
return _wrapper.process_request(command)
def parse(tokens):
"""Takes a token generator and returns a dictionary of general qdisc
attributes and kind (kernel's TCA_KIND) specific attributes"""
kind = next(tokens)
data = {'kind': kind, 'handle': next(tokens)}
for token in tokens:
if token == 'root':
data[token] = _parser.parse_true(tokens)
elif token in ('dev', 'parent'):
data[token] = _parser.parse_str(tokens)
elif token == 'refcnt':
data[token] = _parser.parse_int(tokens)
else:
# Finished with general qdisc attrs. Loop for kind attrs
spec_parser = _spec.get(kind, ())
while True:
if token in spec_parser:
value = spec_parser[token](tokens)
try:
data[kind][token] = value
except KeyError:
data[kind] = {token: value}
else:
pass # Consume anything that we don't know how to parse
try:
token = next(tokens)
except StopIteration:
break
return data
def _parse_limit(tokens):
return int(next(tokens)[:-1]) # leave off the trailing 'p'
def _parse_sfq_flows(tokens):
return Fraction(*[int(el for el in next(tokens).split('/'))])
def _parse_pfifo_fast_priomap(tokens):
return [int(next(tokens)) for _ in range(_TC_PRIO_MAX)]
_spec = {
'fq_codel': {
'ecn': _parser.parse_true,
'flows': _parser.parse_int,
'interval': _parser.parse_time,
'limit': _parse_limit,
'quantum': _parser.parse_int,
'target': _parser.parse_time,
},
'hfsc': {
'default': partial(_parser.parse_int, base=16),
},
'ingress': {
},
'pfifo_fast': {
'bands': _parser.parse_int,
'multiqueue': _parser.parse_str,
'priomap': _parse_pfifo_fast_priomap,
},
'sfq': {
'depth': _parser.parse_int,
'divisor': _parser.parse_int,
'ecn': _parser.parse_true,
'ewma': _parser.parse_int,
'ewma': _parser.parse_int,
'flows': _parse_sfq_flows,
'headdrop': _parser.parse_true,
'limit': _parse_limit,
'max': _parser.parse_size,
'min': _parser.parse_size,
'perturb': _parser.parse_sec,
'probability': _parser.parse_float,
'quantum': _parser.parse_size,
},
}
|
adexin/Python-Machine-Learning-Samples
|
Logistic_regression/Ecommerce_logpredict/logistic.py
|
Python
|
mit
| 495
| 0.00202
|
import numpy as np
import matplotlib.pyplot as plt
N = 200
D = 2
X = np.random.randn(N, D)
# Bias term
ones = np.array([[1]*N]).T
# Concatenate with input X
Xb = np.concatenate((ones, X), axis=1)
# Randomly initialize w
w = np.random.randn(D)
z = X.dot(w
|
)
def sigmoid(z):
return 1/(1 + np.exp(-z))
def hypertan(z):
return (np.exp(z) - np.exp(-z))/(np.exp(z) + np.exp(-z))
print(sigmoid(z))
plt.plot(sigmoid(z))
plt.show()
print(hypertan(z))
plt.plot(hypertan(z))
p
|
lt.show()
|
apple/coremltools
|
coremltools/converters/mil/mil/passes/test_noop_elimination.py
|
Python
|
bsd-3-clause
| 13,225
| 0.001966
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import itertools
import numpy as np
import pytest
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import (
assert_model_is_valid,
get_op_types_in_program,
apply_pass_and_basic_check,
)
@pytest.mark.parametrize("op_type, pos, val", itertools.product(['add', 'mul', 'floor_div', 'pow', 'real_div', 'sub'], ['x', 'y'], [0, 1, [0, 0, 0, 0], [1, 1, 1, 1]]))
def test_elementwise_elimination(op_type, pos, val):
if 'div' in op_type and np.prod(val) == 0:
return
if 'pow' in op_type and (val != 0 or val != 1):
return
test_op = getattr(mb, op_type)
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
if pos == "x":
r1 = test_op(x=val, y=x)
else:
r1 = test_op(x=x, y=val)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = [op_type, "relu"]
new_program = original_program
if op_type in {'add'}:
if val == 0 or val == [0, 0, 0, 0]:
new_program = ["relu"]
elif op_type in {'mul'}:
if val == 1 or val == [1, 1, 1, 1]:
new_program = ["relu"]
elif op_type in {'real_div'}:
# TODO(rdar://79925291): Remove this branch and add `real_div` to the
# following elif once fp32 casts for `real_div` are no longer required.
original_program = ["cast"] + original_program
new_program = original_program
if pos == 'y' and (val == 1 or val == [1, 1, 1, 1]):
new_program = ["cast", "relu"]
elif op_type in {'pow', 'floor_div'}:
if pos == 'y' and (val == 1 or val == [1, 1, 1, 1]):
new_program = ["relu"]
elif op_type in {'sub'}:
if pos == 'y' and (val == 0 or val == [0, 0, 0, 0]):
new_program = ["relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == new_program
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_elementwise_broadcast():
@mb.program(input_specs=[mb.TensorSpec(shape=[4])])
def prog(x):
r1 = mb.add(x=x, y=[[0, 0, 0, 0], [0, 0, 0, 0]])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = ["add", "relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == original_program
assert_model_is_valid(
prog,
{"x": [4]},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_reshape_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape
|
=(2, 4))])
def prog(x):
r1 = mb.reshape(x=x, shape=[1, 8])
r2 = mb.reshape(x=r1, shape=[1, 8])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "relu"]
assert get_op_types_in_program(prog) == ["reshape", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outpu
|
ts[0].name: (1, 8)},
)
def test_oneway_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, num_splits=1, axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_full_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, split_sizes=[4], axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_to_end_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[-1, -1])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebyindex_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_index(x=x, begin=[0, 0], end=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
@pytest.mark.parametrize("begin_mask, end_mask",
itertools.product(itertools.product([True, False],[True, False]),
itertools.product([True, False],[True, False])))
def test_slicebyindex_mask_elimination(begin_mask, end_mask):
@mb.program(input_specs=[mb.TensorSpec(shape=(4, 4))])
def prog(x):
begin = [1, 1]
end = [1, 1]
for i in range(2):
if not begin_mask[i]:
begin[i] = 0
if not end_mask[i]:
end[i] = 4
r1 = mb.slice_by_index(x=x, begin=begin, end=end, begin_mask=begin_mask, end_mask=end_mask)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (4, 4)},
expected_output_shapes={block.outputs[0].name: (4, 4)},
)
def test_pad_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.pad(x=x, pad=[0, 0, 0, 0])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["pad", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_keep_pad():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
|
sixpearls/wagtail-storageimage
|
storageimage/models.py
|
Python
|
bsd-2-clause
| 2,687
| 0.00521
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db.models.fields.files import ImageFieldFile, ImageField
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import pre_save, pre_delete
from PIL import Image as PILImage
from storageimage import settings
class StorageImageFieldFile(ImageFieldFile):
def _get_image_dimensions(self):
# needs to be re-written to actually cache me.
if not hasattr(self, '_dimensions_cache'):
close = self.file.closed
if close:
self.file.open()
else:
file_pos = self.file.tell()
self.file.read() # load the data from remote source
img_pil = PILImage.open(self.file.file)
self._dimensions_cache = img_pil.size
if close:
self.file.close()
else:
self.file.seek(file_pos)
return
|
self._dimensions_cache
class StorageImageField(ImageField):
attr_class = StorageImageFieldFile
def storage_save(self,*args,**kwargs):
fix_save = True
if self.pk:
instance_ref = self.__class__.objects.get(pk=self.pk)
if instance_ref.file == self.file:
# This is a bad way of checking if the file changed...
fix_save = False
if fix_save:
|
avail_name = get_upload_to(self, self.file.name)
reopen = not self.file.file.closed
if reopen:
file_loc = self.file.file.tell()
stored = self.file.storage.save(name=avail_name, content=self.file.file)
self.file = self.file.storage.open(stored)
if reopen:
self.file.file.open()
self.file.file.seek(file_loc)
super(self.__class__, self).save(*args,**kwargs)
if settings.AUTO_INJECTION:
from wagtail.wagtailimages.models import get_image_model, get_upload_to
ImageModel = get_image_model()
for i,f in enumerate(ImageModel._meta.local_fields):
if f.name == 'file':
del ImageModel._meta.local_fields[i]
break
StorageImageField(verbose_name=_('File'), upload_to=get_upload_to, width_field='width', height_field='height').contribute_to_class(ImageModel, 'file')
ImageModel.save = storage_save
RenditionModel = get_image_model().renditions.related.model
for i,f in enumerate(RenditionModel._meta.local_fields):
if f.name == 'file':
del RenditionModel._meta.local_fields[i]
break
StorageImageField(verbose_name=_('File'), upload_to=get_upload_to, width_field='width', height_field='height').contribute_to_class(RenditionModel, 'file')
RenditionModel.save = storage_save
|
calatre/epidemics_network
|
models/other/ep2.py
|
Python
|
apache-2.0
| 3,929
| 0.000509
|
# Universidade de Aveiro - Physics Department
# 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 25/3/2017
# The following code has been based on:
# Forest Fire Model from http://scipython.com/blog/the-forest-fire-model/
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import colors
# Displacements from a cell to its eight nearest neighbours
neighbourhood = ((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1))
# Possible status of a person - Dead, Healthy (but susceptible), Sick or Immune
DEAD, HEALTH, SICK, IMM = 0, 1, 2, 3
# Colours for visualization: black = dead, green = healthy, red = sick
# and blue = immune.
# Note that apparently for the colormap to work, this list and the bounds list
# must be one larger than the number of different values in the array.
colors_list = ['k', 'g', 'r', 'b', 'b']
cmap = colors.ListedColormap(colors_list)
bounds = [0, 1, 2, 3, 4]
norm = colors.BoundaryNorm(bounds, cmap.N)
# Defining the main function:
def iterate(X):
"""Iterate the map according to the epidemic rules."""
# lottery = np.random.random()
# The boundary of the map is always empty, so only consider cells
# indexed from 1 to nx-2, 1 to ny-2
X1 = np.zeros((ny, nx))
for ix in range(1, nx - 1):
for iy in range(1, ny - 1):
# if empty, probability p of spawning new people
if X[iy, ix] == DEAD and np.random.random() <= p:
X1[iy, ix] = HEALTH
# if sick, probability i of cure and becoming immune
if X[iy, ix] == SICK and np.random.random() <= i:
X1[iy, ix] = IMM
# if sick, probability (s-i) of surviving one more turn
if X[iy, ix] == SICK and np.random.random() <= s:
X1[iy, ix] = SICK
# if you're immune, you're not going anywhere
if X[iy, ix] == IMM: # and np.random.random() >= (1/ept):
X1[iy, ix] = IMM
# if you're healthy the same, except...
if X[iy, ix] == HEALTH:
X1[iy, ix] = HEALTH
# if a neighbour is sick, there's a chance c of contagion
for d
|
x, dy in neighbourhood:
if X[iy + dy,
|
ix + dx] == SICK and np.random.random() <= c:
X1[iy, ix] = SICK
break
# and once in a while, diseases appear out of nowhere
else:
if np.random.random() <= d:
X1[iy, ix] = SICK
# if not dead, they can die from natural causes
if X[iy, ix] != DEAD and np.random.random() <= (0.01 / ept):
X1[iy, ix] = DEAD
return X1
# The initial fraction of the map occupied by people.
ppl_fraction = 1
# base probabilities: people born, diseases appear, chance of contagion
p, d, c = 0.1, 0.00001, 0.05
# transition probabilities: if sick, subject gets _i_mmune < _s_urvives < dies
i, s = 0.1, 0.9
# age expectancy: people can die of old age
ept = 60
# map size (number of cells in x and y directions).
nx, ny = 100, 100
# Initialize the map grid.
X = np.zeros((ny, nx))
X[1:ny - 1, 1:nx - 1] = np.random.randint(0, 2, size=(ny - 2, nx - 2))
X[1:ny - 1, 1:nx - 1] = np.random.random(size=(ny - 2, nx - 2)) < ppl_fraction
# plotting a single frame
fig = plt.figure(figsize=(25 / 3, 6.25))
ax = fig.add_subplot(111)
ax.set_axis_off()
im = ax.imshow(X, cmap=cmap, norm=norm) # , interpolation='nearest')
# The animation function: called to produce a frame for each generation.
def animate(i):
im.set_data(animate.X)
animate.X = iterate(animate.X)
# Bind our grid to the identifier X in the animate function's namespace.
animate.X = X
# Interval between frames (ms).
interval = 1
anim = animation.FuncAnimation(fig, animate, interval=interval)
plt.show()
|
dunkhong/grr
|
grr/server/grr_response_server/flows/general/registry_init.py
|
Python
|
apache-2.0
| 1,568
| 0
|
#!/usr/bin/env python
"""Load all flows so that they are visible in the registry."""
from __future__ import absolute_import
fro
|
m __future__ import division
from __future__ import unicode_literals
# pylint: disable=unused-imp
|
ort
# These imports populate the Flow registry
from grr_response_server.flows.general import administrative
from grr_response_server.flows.general import apple_firmware
from grr_response_server.flows.general import artifact_fallbacks
from grr_response_server.flows.general import ca_enroller
from grr_response_server.flows.general import checks
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import discovery
from grr_response_server.flows.general import export
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import filesystem
from grr_response_server.flows.general import filetypes
from grr_response_server.flows.general import find
from grr_response_server.flows.general import fingerprint
from grr_response_server.flows.general import hardware
from grr_response_server.flows.general import memory
from grr_response_server.flows.general import network
from grr_response_server.flows.general import osquery
from grr_response_server.flows.general import processes
from grr_response_server.flows.general import registry
from grr_response_server.flows.general import timeline
from grr_response_server.flows.general import transfer
from grr_response_server.flows.general import webhistory
from grr_response_server.flows.general import windows_vsc
|
rananda/cfme_tests
|
cfme/tests/containers/test_relationships.py
|
Python
|
gpl-2.0
| 4,007
| 0.002496
|
from random import shuffle
import pytest
from utils import testgen
from utils.version import current_version
from cfme.web_ui import paginator, summary_title
from cfme.containers.pod import Pod, paged_tbl as pod_paged_tbl
from cfme.containers.provider import ContainersProvider, paged_tbl as provider_paged_tbl,\
navigate_and_get_rows
from cfme.containers.service import Service, paged_tbl as service_paged_tbl
from cfme.containers.node import Node, list_tbl as node_paged_tbl
from cfme.containers.replicator import Replicator, paged_tbl as replicator_paged_tbl
from cfme.containers.image import Image, paged_tbl as image_paged_tbl
from cfme.containers.project import Project, paged_tbl as project_paged_tbl
from cfme.containers.template import Template, paged_tbl as template_paged_tbl
from cfme.containers.container import Container, paged_tbl as container_paged_tbl
from cfme.containers.image_registry import ImageRegistry, paged_tbl as image_registry_paged_tbl
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(1)]
pytest_generate_tests = testgen.generate([ContainersProvider], scope='function')
class DataSet(object):
def __init_
|
_(self, obj, paged_tbl, polarion_id):
self.obj = obj
self.paged_tbl = paged_tbl
pytest.mark.polarion(polarion_id)(self)
TEST_OBJECTS = [
DataSet(ContainersProvider, provider_paged_tbl, 'CM
|
P-9851'),
DataSet(Container, container_paged_tbl, 'CMP-9947'),
DataSet(Pod, pod_paged_tbl, 'CMP-9929'),
DataSet(Service, service_paged_tbl, 'CMP-10564'),
DataSet(Node, node_paged_tbl, 'CMP-9962'),
DataSet(Replicator, replicator_paged_tbl, 'CMP-10565'),
DataSet(Image, image_paged_tbl, 'CMP-9980'),
DataSet(ImageRegistry, image_registry_paged_tbl, 'CMP-9994'),
DataSet(Project, project_paged_tbl, 'CMP-9868'),
DataSet(Template, template_paged_tbl, 'CMP-10319')
]
def check_relationships(instance):
"""Check the relationships linking & data integrity"""
sum_values = instance.summary.relationships.items().values()
shuffle(sum_values)
for attr in sum_values:
if attr.clickable:
break
else:
return # No clickable object but we still want to pass
link_value = attr.value
attr.click()
if type(link_value) is int:
rec_total = paginator.rec_total()
if rec_total != link_value:
raise Exception('Difference between the value({}) in the relationships table in {}'
'to number of records ({}) in the target'
'page'.format(link_value, instance.name, rec_total))
else:
assert '(Summary)' in summary_title()
@pytest.mark.parametrize('data_set', TEST_OBJECTS, ids=[obj.obj for obj in TEST_OBJECTS])
def test_relationships_tables(provider, data_set):
"""This test verifies the integrity of the Relationships table.
clicking on each field in the Relationships table takes the user
to either Summary page where we verify that the field that appears
in the Relationships table also appears in the Properties table,
or to the page where the number of rows is equal to the number
that is displayed in the Relationships table.
"""
if current_version() < "5.7" and data_set.obj == Template:
pytest.skip('Templates are not exist in CFME version smaller than 5.7. skipping...')
rows = navigate_and_get_rows(provider, data_set.obj, data_set.paged_tbl, 1)
if not rows:
pytest.skip('No objects to test for relationships for {}'.format(data_set.obj.__name__))
row = rows[-1]
if data_set.obj is Container:
instance = data_set.obj(row.name.text, row.pod_name.text)
elif data_set.obj is ImageRegistry:
instance = data_set.obj(row.host.text, provider)
elif data_set.obj is Image:
instance = data_set.obj(row.name.text, row.tag.text, provider)
else:
instance = data_set.obj(row.name.text, provider)
check_relationships(instance)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/frontend_ip_configuration_py3.py
|
Python
|
mit
| 4,846
| 0.003921
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class FrontendIPConfiguration(SubResource):
"""Frontend IP address of the load balancer.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar inbound_nat_rules: Read only. Inbound rules URIs that use this
frontend IP.
:vartype inbound_nat_rules:
list[~azure.mgmt.network.v2018_01_01.models.SubResource]
:ivar inbound_nat_pools: Read only. Inbound pools URIs that use this
frontend IP.
:vartype inbound_nat_pools:
list[~azure.mgmt.network.v2018_01_01.models.SubResource]
:ivar outbound_nat_rules: Read only. Outbound rules URIs that use this
frontend IP.
:vartype outbound_nat_rules:
list[~azure.mgmt.network.v2018_01_01.models.SubResource]
:ivar load_balancing_rules: Gets load balancing rules URIs that use this
frontend IP.
:vartype load_balancing_rules:
list[~azure.mgmt.network.v2018_01_01.models.SubResource]
:param private_ip_address: The private IP address of the IP configurat
|
ion.
:type private_ip_address: str
:param private_ip_allocation_method: The Private IP allocation method.
Possible values are: 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2018_01_01.models.IPAllocationMethod
:param subnet: The reference
|
of the subnet resource.
:type subnet: ~azure.mgmt.network.v2018_01_01.models.Subnet
:param public_ip_address: The reference of the Public IP resource.
:type public_ip_address:
~azure.mgmt.network.v2018_01_01.models.PublicIPAddress
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param zones: A list of availability zones denoting the IP allocated for
the resource needs to come from.
:type zones: list[str]
"""
_validation = {
'inbound_nat_rules': {'readonly': True},
'inbound_nat_pools': {'readonly': True},
'outbound_nat_rules': {'readonly': True},
'load_balancing_rules': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[SubResource]'},
'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[SubResource]'},
'outbound_nat_rules': {'key': 'properties.outboundNatRules', 'type': '[SubResource]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, *, id: str=None, private_ip_address: str=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_state: str=None, name: str=None, etag: str=None, zones=None, **kwargs) -> None:
super(FrontendIPConfiguration, self).__init__(id=id, **kwargs)
self.inbound_nat_rules = None
self.inbound_nat_pools = None
self.outbound_nat_rules = None
self.load_balancing_rules = None
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.zones = zones
|
mc3k/graph-stats
|
datagen.py
|
Python
|
mit
| 433
| 0.027714
|
#!/usr/bin/env python
import sys, os, logging, urllib, datetime
def fetchtemp():
cmd = '/opt/vc/bin/vcgencmd measure_temp'
line = os.popen(cmd).readline().strip()
output = line.split('=')[1].split("'
|
")[0]#+' C'
return output
format = "%Y-%m-%d,%H:%M:%S"
today = datetime.datetime.today()
s = today.strftime(format)
output = s+' '+fetchtemp()+'\n'
wit
|
h open('/home/pi/bin/plottemp/tempdata.dat', 'a') as f:
f.write(output)
|
robmcmullen/peppy
|
peppy/plugins/spelling.py
|
Python
|
gpl-2.0
| 958
| 0.004175
|
# peppy Copyright (c) 2006-2010 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for mo
|
re info
"""Spell checking provider
"""
import os, sys
import wx
from wx.lib.pubsub import Publisher
from peppy.yapsy.plugins import *
from peppy.lib.stcspellcheck import *
class SpellCheck(IPeppy
|
Plugin):
"""Plugin for spell check provider
This simple plugin provides the spelling checker for Fundamental mode.
"""
def activateHook(self):
Publisher().subscribe(self.getProvider, 'spelling.provider')
Publisher().subscribe(self.defaultLanguage, 'spelling.default_language')
def deactivateHook(self):
Publisher().unsubscribe(self.getProvider)
Publisher().unsubscribe(self.defaultLanguage)
def getProvider(self, message):
message.data.append(STCSpellCheck)
def defaultLanguage(self, message):
lang = message.data
STCSpellCheck.setDefaultLanguage(lang)
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.1/Lib/test/test_fileinput.py
|
Python
|
mit
| 8,498
| 0.004001
|
'''
Tests for fileinput module.
Nick Mathewson
'''
import unittest
from test.support import verbose, TESTFN, run_unittest
from test.support import unlink as safe_unlink
import sys, re
from io import StringIO
from fileinput import FileInput, hook_encoded
# The fileinput module has 2 interfaces: the FileInput class which does
# all the work, and a few functions (input, etc.) that use a global _state
# variable. We only test the FileInput class, since the other functions
# only provide a thin facade over FileInput.
# Write lines (a list of lines) to temp file number i, and return the
# temp file's name.
def writeTmp(i, lines, mode='w'): # opening in text mode is the default
name = TESTFN + str(i)
f = open(name, mode)
for line in lines:
f.write(line)
f.close()
return name
def remove_tempfiles(*names):
for name in names:
if name:
safe_unlink(name)
class BufferSizesTests(unittest.TestCase):
def test_buffer_sizes(self):
# First, run the tests with default and teeny buffer size.
for round, bs in (0, 0), (1, 30):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, ["Line %s of file 1\n" % (i+1) for i in range(15)])
t2 = writeTmp(2, ["Line %s of file 2\n" % (i+1) for i in range(10)])
t3 = writeTmp(3, ["Line %s of file 3\n" % (i+1) for i in range(5)])
t4 = writeTmp(4, ["Line %s of file 4\n" % (i+1) for i in range(1)])
self.buffer_size_test(t1, t2, t3, t4, bs, round)
finally:
remove_tempfil
|
es(t1, t2, t3, t4)
def buffer_size_test(self, t1, t2, t3, t4, bs=0, round=0):
pat = re.compile(r'LINE (\d+) O
|
F FILE (\d+)')
start = 1 + round*6
if verbose:
print('%s. Simple iteration (bs=%s)' % (start+0, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
lines = list(fi)
fi.close()
self.assertEqual(len(lines), 31)
self.assertEqual(lines[4], 'Line 5 of file 1\n')
self.assertEqual(lines[30], 'Line 1 of file 4\n')
self.assertEqual(fi.lineno(), 31)
self.assertEqual(fi.filename(), t4)
if verbose:
print('%s. Status variables (bs=%s)' % (start+1, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
s = "x"
while s and s != 'Line 6 of file 2\n':
s = fi.readline()
self.assertEqual(fi.filename(), t2)
self.assertEqual(fi.lineno(), 21)
self.assertEqual(fi.filelineno(), 6)
self.failIf(fi.isfirstline())
self.failIf(fi.isstdin())
if verbose:
print('%s. Nextfile (bs=%s)' % (start+2, bs))
fi.nextfile()
self.assertEqual(fi.readline(), 'Line 1 of file 3\n')
self.assertEqual(fi.lineno(), 22)
fi.close()
if verbose:
print('%s. Stdin (bs=%s)' % (start+3, bs))
fi = FileInput(files=(t1, t2, t3, t4, '-'), bufsize=bs)
savestdin = sys.stdin
try:
sys.stdin = StringIO("Line 1 of stdin\nLine 2 of stdin\n")
lines = list(fi)
self.assertEqual(len(lines), 33)
self.assertEqual(lines[32], 'Line 2 of stdin\n')
self.assertEqual(fi.filename(), '<stdin>')
fi.nextfile()
finally:
sys.stdin = savestdin
if verbose:
print('%s. Boundary conditions (bs=%s)' % (start+4, bs))
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
fi.nextfile()
self.assertEqual(fi.lineno(), 0)
self.assertEqual(fi.filename(), None)
if verbose:
print('%s. Inplace (bs=%s)' % (start+5, bs))
savestdout = sys.stdout
try:
fi = FileInput(files=(t1, t2, t3, t4), inplace=1, bufsize=bs)
for line in fi:
line = line[:-1].upper()
print(line)
fi.close()
finally:
sys.stdout = savestdout
fi = FileInput(files=(t1, t2, t3, t4), bufsize=bs)
for line in fi:
self.assertEqual(line[-1], '\n')
m = pat.match(line[:-1])
self.assertNotEqual(m, None)
self.assertEqual(int(m.group(1)), fi.filelineno())
fi.close()
class FileInputTests(unittest.TestCase):
def test_zero_byte_files(self):
t1 = t2 = t3 = t4 = None
try:
t1 = writeTmp(1, [""])
t2 = writeTmp(2, [""])
t3 = writeTmp(3, ["The only line there is.\n"])
t4 = writeTmp(4, [""])
fi = FileInput(files=(t1, t2, t3, t4))
line = fi.readline()
self.assertEqual(line, 'The only line there is.\n')
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 1)
self.assertEqual(fi.filename(), t3)
line = fi.readline()
self.failIf(line)
self.assertEqual(fi.lineno(), 1)
self.assertEqual(fi.filelineno(), 0)
self.assertEqual(fi.filename(), t4)
fi.close()
finally:
remove_tempfiles(t1, t2, t3, t4)
def test_files_that_dont_end_with_newline(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB\nC"])
t2 = writeTmp(2, ["D\nE\nF"])
fi = FileInput(files=(t1, t2))
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
self.assertEqual(fi.filelineno(), 3)
self.assertEqual(fi.lineno(), 6)
finally:
remove_tempfiles(t1, t2)
## def test_unicode_filenames(self):
## # XXX A unicode string is always returned by writeTmp.
## # So is this needed?
## try:
## t1 = writeTmp(1, ["A\nB"])
## encoding = sys.getfilesystemencoding()
## if encoding is None:
## encoding = 'ascii'
## fi = FileInput(files=str(t1, encoding))
## lines = list(fi)
## self.assertEqual(lines, ["A\n", "B"])
## finally:
## remove_tempfiles(t1)
def test_fileno(self):
t1 = t2 = None
try:
t1 = writeTmp(1, ["A\nB"])
t2 = writeTmp(2, ["C\nD"])
fi = FileInput(files=(t1, t2))
self.assertEqual(fi.fileno(), -1)
line =next( fi)
self.assertNotEqual(fi.fileno(), -1)
fi.nextfile()
self.assertEqual(fi.fileno(), -1)
line = list(fi)
self.assertEqual(fi.fileno(), -1)
finally:
remove_tempfiles(t1, t2)
def test_opening_mode(self):
try:
# invalid mode, should raise ValueError
fi = FileInput(mode="w")
self.fail("FileInput should reject invalid mode argument")
except ValueError:
pass
t1 = None
try:
# try opening in universal newline mode
t1 = writeTmp(1, [b"A\nB\r\nC\rD"], mode="wb")
fi = FileInput(files=t1, mode="U")
lines = list(fi)
self.assertEqual(lines, ["A\n", "B\n", "C\n", "D"])
finally:
remove_tempfiles(t1)
def test_file_opening_hook(self):
try:
# cannot use openhook and inplace mode
fi = FileInput(inplace=1, openhook=lambda f, m: None)
self.fail("FileInput should raise if both inplace "
"and openhook arguments are given")
except ValueError:
pass
try:
fi = FileInput(openhook=1)
self.fail("FileInput should check openhook for being callable")
except ValueError:
pass
# XXX The rot13 codec was removed.
# So this test needs to be changed to use something else.
# (Or perhaps the API needs to change so we can just pass
# an encoding rather than using a hook?)
## try:
## t1 = writeTmp(1, ["A\nB"], mode="wb")
##
|
Chilledheart/chromium
|
tools/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_agent.py
|
Python
|
bsd-3-clause
| 3,194
| 0.007514
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import stat
import tempfile
from telemetry.internal.platform import tracing_agent
from telemetry.internal.platform.tracing_agent import (
chrome_devtools_tracing_backend)
_DESKTOP_OS_NAMES = ['linux', 'mac', 'win']
# The trace config file path should be the same as specified in
# src/components/tracing/s
|
tartup_tracing.cc
_CHROME_TRACE_CONFIG_DIR_ANDROID = '/data/local/'
_CHROME_TRACE_CONFIG_FILE_NAME = 'chrome-trace-config.json'
class ChromeTracingAgent(tracing_agent.TracingAgent):
def __init__(self, platform_backend):
super(ChromeTracingAgent, self).__init__(platform_backend)
self._chrome_devtools_tracing_backend = (
chrome_devtools_tracing_backend.ChromeDevtoolsTracingBackend(
platform_backend))
self._trace
|
_config_file = None
@property
def trace_config_file(self):
return self._trace_config_file
@classmethod
def RegisterDevToolsClient(cls, devtools_client_backend, platform_backend):
(chrome_devtools_tracing_backend.ChromeDevtoolsTracingBackend
.RegisterDevToolsClient(devtools_client_backend, platform_backend))
@classmethod
def IsSupported(cls, platform_backend):
return (chrome_devtools_tracing_backend.ChromeDevtoolsTracingBackend
.IsSupported(platform_backend))
def Start(self, trace_options, category_filter, timeout):
return self._chrome_devtools_tracing_backend.Start(
trace_options, category_filter, timeout)
def Stop(self, trace_data_builder):
self._chrome_devtools_tracing_backend.Stop(trace_data_builder)
def _CreateTraceConfigFile(self, config):
assert not self._trace_config_file
if self._platform_backend.GetOSName() == 'android':
self._trace_config_file = os.path.join(_CHROME_TRACE_CONFIG_DIR_ANDROID,
_CHROME_TRACE_CONFIG_FILE_NAME)
self._platform_backend.device.WriteFile(self._trace_config_file,
config.GetTraceConfigJsonString(), as_root=True)
elif self._platform_backend.GetOSName() in _DESKTOP_OS_NAMES:
self._trace_config_file = os.path.join(tempfile.mkdtemp(),
_CHROME_TRACE_CONFIG_FILE_NAME)
with open(self._trace_config_file, 'w') as f:
f.write(config.GetTraceConfigJsonString())
os.chmod(self._trace_config_file,
os.stat(self._trace_config_file).st_mode | stat.S_IROTH)
else:
raise NotImplementedError
def _RemoveTraceConfigFile(self):
if not self._trace_config_file:
return
if self._platform_backend.GetOSName() == 'android':
self._platform_backend.device.RunShellCommand(
['rm', '-f', self._trace_config_file], check_return=True,
as_root=True)
elif self._platform_backend.GetOSName() in _DESKTOP_OS_NAMES:
if os.path.exists(self._trace_config_file):
os.remove(self._trace_config_file)
shutil.rmtree(os.path.dirname(self._trace_config_file))
else:
raise NotImplementedError
self._trace_config_file = None
|
Orav/kbengine
|
kbe/src/lib/python/Lib/distutils/unixccompiler.py
|
Python
|
lgpl-3.0
| 13,419
| 0.00149
|
"""distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
|
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (p
|
ossibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
import os, sys, re
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
if sys.platform == 'darwin':
import _osx_support
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
# SunCCompiler, and I suspect down that road lies madness.
# * even if we don't know a warning flag from an optimization flag,
# we need some way for outsiders to feed preprocessor/compiler/linker
# flags in to us -- eg. a sysadmin might want to mandate certain flags
# via a site config file, or a user might want to set something for
# compiling this module distribution only via the setup.py command
# line, whatever. As long as these options come from something on the
# current system, they can be as system-dependent as they like, and we
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
# These are used by CCompiler in two places: the constructor sets
# instance attributes 'preprocessor', 'compiler', etc. from them, and
# 'set_executable()' allows any of these to be set. The defaults here
# are pretty generic; they will probably have to be set by an outsider
# (eg. using information discovered by the sysconfig about building
# Python extensions).
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
'compiler_cxx' : ["cc"],
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
if sys.platform[:6] == "darwin":
executables['ranlib'] = ["ranlib"]
# Needed for the filename generation methods provided by the base
# class, CCompiler. NB. whoever instantiates/uses a particular
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
# reasonable common default here, but it's not necessarily used on all
# Unices!
src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".so"
dylib_lib_extension = ".dylib"
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
if sys.platform == "cygwin":
exe_extension = ".exe"
def preprocess(self, source, output_file=None, macros=None,
include_dirs=None, extra_preargs=None, extra_postargs=None):
fixed_args = self._fix_compile_args(None, macros, include_dirs)
ignore, macros, include_dirs = fixed_args
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or we're
# generating output to stdout, or there's a target output file and
# the source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError as msg:
raise CompileError(msg)
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _osx_support.compiler_fixup(compiler_so,
cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver +
[output_filename] +
objects + self.objects)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
libraries, library_dirs, runtime_library_dirs = fixed_args
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if not isinstance(output_dir, (str, type(None))):
raise TypeError("'output_dir' must be a string or None")
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = (objects + self.objects +
lib_opts + ['-o', output_filename])
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == "c++" and self.compiler_cxx:
# skip over environment variable settings if /usr/bin/env
# is used to set up the linker's environment.
# This is nee
|
sixhobbits/CorpusBuilder
|
corpusbuilder.py
|
Python
|
apache-2.0
| 1,608
| 0.00995
|
#
# corpusbuilder.py
# Builds and extends a news corpus stored in SQLite
# Gareth Dwyer, 2016
# # # # # # # # # # # # # #
# standard imports
# third party imports
import newspaper
# local imports
import config
from article import Article
from common import log
from dbhelper import DBHelper
from publisher import Publisher
from newsfetcher import NewsFetcher
class CorpusBuilder:
def __init__(self, dbname):
self.db = DBHelper(dbname, fts5_path=config.fts5_path)
self.dbname = dbname
def first_run(self):
self.db.setup()
self.add_publishers(c
|
onfig.publishers_file)
def add_publishers(self, csv_file):
with open(csv_file) as f:
for line in f.read().strip().split("\n"):
try:
name, key, url = line.split(",")
publisher = Publisher(url.strip(), None, name.strip(),
|
key.strip())
self.db.add_publisher(publisher)
except Exception as e:
log(e)
continue
return True
def process_article(self, a, publisher):
db = DBHelper(self.dbname)
article = Article(publisher.id, a.url, a.title, a.text, a.html)
db.add_article_with_retry(article)
def fetch_all_news(self):
nf = NewsFetcher()
publishers = self.db.get_publishers()
for publisher in publishers:
log("Fetching news from {}".format(publisher.url))
nf.fetch_news(publisher, self)
return True
|
agry/NGECore2
|
scripts/mobiles/tatooine/razorback_felspur.py
|
Python
|
lgpl-3.0
| 1,656
| 0.02657
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('desert_razorback_felspur')
mobileTemplate.setLevel(24)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Herbivore Meat")
mobileTemplate.setMeatAmount(65)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setBoneAmou
|
nt(40)
mobileTemplate.setBoneType("Animal Bone")
mobileTemplate.setHideAmount(25)
mobileTemplate.setSocialGroup("zucca Boar")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add
|
('object/mobile/shared_zucca_boar.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_charge_2')
attacks.add('bm_dampen_pain_2')
attacks.add('bm_slash_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('razorback_felspur', mobileTemplate)
return
|
madmax983/h2o-3
|
h2o-py/tests/testdir_algos/glrm/pyunit_benign_glrm.py
|
Python
|
apache-2.0
| 663
| 0.022624
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
def glrm_benign():
print "Importing benign.csv data..."
benignH2O = h2o.upload_file(pyunit_utils.locate("smalldata/logreg/benign.csv"))
benignH2O.describe()
for i in range(8,16,2):
print "H2O GLRM with rank " + str(i) + " decomposition:\n"
glrm_h2o = H2OGeneralizedLowRankEstimator(k=i, init="SVD", recover_svd=True)
glrm_h2o.train(x=benignH2O.names, training_frame=benignH2O)
g
|
lrm_h2o.show()
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_ben
|
ign)
else:
glrm_benign()
|
raymondnijssen/QGIS
|
tests/src/python/test_core_additions.py
|
Python
|
gpl-2.0
| 1,581
| 0.000633
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for core additions
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Denis Rouzaud'
__date__ = '15.5.2018'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.testing import unittest, start_app
from qgis.core import metaEnumFromValue, metaEnumFromType, QgsTolerance, QgsMapLayer
import sip
start_app()
class TestCoreAdditions(unittest.TestCase):
def testMetaEnum(self):
me = metaEnumFromValue(QgsTolerance.Pixels)
self.assertIsNotNone(me)
self.assertEqual(me.valueToKey(QgsTolerance.Pixels), 'Pixels')
# if using same variable twice (e.g. me = me2), this seg faults
me2 = metaEnumFromValue(QgsTolerance.Pixels, QgsTolerance)
self.assertIsNotNone(me)
self.assertEqual(me2.valueToKey(QgsTolerance.Pixels), 'Pixels')
# do not raise error
self.assertIsNone(met
|
aEnumFromValue(1, QgsTolerance, False))
# do not provide an int
|
with self.assertRaises(TypeError):
metaEnumFromValue(1)
# QgsMapLayer.LayerType is not a Q_ENUM
with self.assertRaises(ValueError):
metaEnumFromValue(QgsMapLayer.LayerType)
if __name__ == "__main__":
unittest.main()
|
catapult-project/catapult
|
dashboard/dashboard/services/issue_tracker_service_test.py
|
Python
|
bsd-3-clause
| 11,440
| 0.002535
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from six.moves import http_client
import json
import mock
import unittest
from apiclient import errors
from dashboard.common import testing_common
from dashboard.services import issue_tracker_service
@mock.patch('services.issue_tracker_service.discovery.build', mock.MagicMock())
class IssueTrackerServiceTest(testing_common.TestCase):
def testAddBugComment_Basic(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertTrue(service.AddBugComment(12345, 'The comment'))
self.assertEqual(1, service._MakeCommentRequest.call_count)
service._MakeCommentRequest.assert_called_with(
12345, {
'updates': {},
'content': 'The comment'
},
project='chromium',
send_email=True)
def testAddBugComment_Basic_EmptyProject(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertTrue(service.AddBugComment(12345, 'The comment', project=''))
self.assertEqual(1, service._MakeCommentRequest.call_count)
service._MakeCommentRequest.assert_called_with(
12345, {
'updates': {},
'content': 'The comment'
},
project='chromium',
send_email=True)
def testAddBugComment_Basic_ProjectIsNone(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertTrue(service.AddBugComment(12345, 'The comment', project=None))
self.assertEqual(1, service._MakeCommentRequest.call_count)
service._MakeCommentRequest.assert_called_with(
12345, {
'updates': {},
'content': 'The comment'
},
project='chromium',
send_email=True)
def testAddBugComment_WithNoBug_ReturnsFalse(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertFalse(service.AddBugComment(None, 'Some comment'))
self.assertFalse(service.AddBugComment(-1, 'Some comment'))
def testAddBugComment_WithOptionalParameters(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertTrue(
service.AddBugComment(
12345,
'Some other comment',
status='Fixed',
labels=['Foo'],
cc_list=['someone@chromium.org']))
self.assertEqual(1, service._MakeCommentRequest.call_count)
service._MakeCommentRequest.assert_called_with(
12345, {
'updates': {
'status': 'Fixed',
'cc': ['someone@chromium.org'],
'labels': ['Foo'],
},
'content': 'Some other comment'
},
project='chromium',
send_email=True)
def testAddBugComment_MergeBug(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCommentRequest = mock.Mock()
self.assertTrue(service.AddBugComment(12345, 'Dupe', merge_issue=54321))
self.assertEqual(1, service._MakeCommentRequest.call_count)
service._MakeCommentRequest.assert_called_with(
12345, {
'updates': {
'status': 'Duplicate',
'mergedInto': 'chromium:54321',
},
'content': 'Dupe'
},
project='chromium',
send_email=True)
@mock.patch('logging.error')
def testAddBugComment_Error(self, mock_logging_error):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(return_value=None)
self.assertFalse(service.AddBugComment(12345, 'My bug comment'))
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertEqual(1, mock_logging_error.call_count)
def testNewBug_Suc
|
cess_NewBugReturnsId(self):
|
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(return_value={'id': 333})
response = service.NewBug('Bug title', 'body', owner='someone@chromium.org')
bug_id = response['bug_id']
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertEqual(333, bug_id)
def testNewBug_Success_SupportNonChromium(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(return_value={
'id': 333,
'projectId': 'non-chromium'
})
response = service.NewBug(
'Bug title',
'body',
owner='someone@example.com',
project='non-chromium')
bug_id = response['bug_id']
project_id = response['project_id']
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertEqual(333, bug_id)
self.assertEqual('non-chromium', project_id)
def testNewBug_Success_ProjectIsEmpty(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(return_value={
'id': 333,
'projectId': 'chromium'
})
response = service.NewBug(
'Bug title', 'body', owner='someone@example.com', project='')
bug_id = response['bug_id']
project_id = response['project_id']
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertEqual(333, bug_id)
self.assertEqual('chromium', project_id)
def testNewBug_Success_ProjectIsNone(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(return_value={
'id': 333,
'projectId': 'chromium'
})
response = service.NewBug(
'Bug title', 'body', owner='someone@example.com', project=None)
bug_id = response['bug_id']
project_id = response['project_id']
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertEqual(333, bug_id)
self.assertEqual('chromium', project_id)
def testNewBug_Failure_HTTPException(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(
side_effect=http_client.HTTPException('reason'))
response = service.NewBug('Bug title', 'body', owner='someone@chromium.org')
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertIn('error', response)
def testNewBug_Failure_NewBugReturnsError(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._ExecuteRequest = mock.Mock(return_value={})
response = service.NewBug('Bug title', 'body', owner='someone@chromium.org')
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertTrue('error' in response)
def testNewBug_HttpError_NewBugReturnsError(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
error_content = {
'error': {
'message': 'The user does not exist: test@chromium.org',
'code': 404
}
}
service._ExecuteRequest = mock.Mock(
side_effect=errors.HttpError(
mock.Mock(return_value={'status': 404}), json.dumps(error_content)))
response = service.NewBug('Bug title', 'body', owner='someone@chromium.org')
self.assertEqual(1, service._ExecuteRequest.call_count)
self.assertTrue('error' in response)
def testNewBug_UsesExpectedParams(self):
service = issue_tracker_service.IssueTrackerService(mock.MagicMock())
service._MakeCreateRequest = mock.Mock()
service.NewBug(
'Bug title',
'body',
owner='someone@chromium.org',
cc=['somebody@chromium.org', 'nobody@chromium.org'])
service._MakeCreateRequest.assert_called_with(
{
'title': 'Bug title',
'summary': 'Bug title',
'description': 'body',
'labels': []
|
uhuramedia/django-experiments
|
experiments/signals.py
|
Python
|
mit
| 124
| 0.008065
|
from django.dispatch import Signal
us
|
er_enr
|
olled = Signal(providing_args=['experiment', 'alternative', 'user', 'session'])
|
eLBati/odoo
|
addons/website_sale/models/sale_order.py
|
Python
|
agpl-3.0
| 10,830
| 0.006833
|
# -*- coding: utf-8 -*-
import random
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.addons.web.http import request
class payment_transaction(orm.Model):
_inherit = 'payment.transaction'
_columns = {
# link with the sale order
'sale_order_id': fields.many2one('sale.order', 'Sale Order'),
}
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_qty(self, cr, uid, ids, field_name, arg, context=None):
res = dict()
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = int(sum(l.product_uom_qty for l in (order.website_order_line or [])))
return res
_columns = {
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
'cart_quantity': fields.function(_cart_qty, type='integer', string='Cart Quantity'),
'payment_acquirer_id': fields.many2one('payment.acquirer', 'Payment Acquirer', on_delete='set null'),
'payment_tx_id': fields.many2one('payment.transaction', 'Transaction', on_delete='set null'),
}
def _get_errors(self, cr, uid, order, context=None):
return []
def _get_website_data(self, cr, uid, order, context):
return {
'partner': order.partner_id.id,
'order': order
}
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
for so in self.browse(cr, uid, ids, context=context):
domain = [('order_id', '=', so.id), ('product_id', '=', product_id)]
if line_id:
domain += [('id', '=', line_id)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, line_id=None, context=None):
so = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)
values = self.pool.get('sale.order.line').product_id_change(cr, SUPERUSER_ID, [],
pricelist=so.pricelist_id.id,
product=product_id,
partner_id=so.partner_id.id,
context=context
)['value']
if line_id:
line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)
values['name'] = line.name
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
values['name'] = product.description_sale or product.name
values['product_id'] = product_id
values['order_id'] = order_id
if values.get('tax_id') != None:
values['tax_id'] = [(6, 0, values['tax_id'])]
return values
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
sol = self.pool.get('sale.order.line')
quantity = 0
for so in self.browse(cr, uid, ids, context=context):
if line_id != False:
line_ids = so._cart_find_product_line(product_id, line_id, context=context, **kwargs)
if line_ids:
line_id = line_ids[0]
# Create line if no line with product_id can be located
if not line_id:
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, context=context)
line_id = sol.create(cr, SUPERUSER_ID, values, context=context)
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty != None:
quantity = sol.browse(cr, SUPERUSER_ID, line_id, context=context).product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
sol.unlink(cr, SUPERUSER_ID, [line_id], context=context)
else:
# update line
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, line_id, context=context)
values['product_uom_qty'] = quantity
sol.write(cr, SUPERUSER_ID, [line_id], values, context=context)
return {'line_id': line_id, 'quantity': quantity}
def _cart_accessories(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
s = set(j.id for l in (order.website_order_line or []) for j in (l.product_id.accessory_product_ids or []))
s -= set(l.product_id.id for l in order.order_line)
product_ids = random.sample(s, min(len(s),3))
return self.pool['product.product'].browse(cr, uid, product_ids, context=context)
class website(orm.Model):
_inherit = 'website'
_columns = {
'pricelist_id': fields.related('user_id','partner_id','property_product_pricelist',
type='many2one', relation='product.pricelist', string='Default pricelist'),
'currency_id': fields.related('pricelist_id','currency_id',
type='many2one', relation='res.curr
|
ency', string='Default pricelist'),
}
def sale_product_domain(self, cr, uid, ids, context=None):
return [("sale_ok", "=", True)]
def sale_get_order(self, cr, uid, ids, force_create=False, code=None, update_pricelist=None, context=None):
sale_order_obj = self.po
|
ol['sale.order']
sale_order_id = request.session.get('sale_order_id')
sale_order = None
# create so if needed
if not sale_order_id and (force_create or code):
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
for w in self.browse(cr, uid, ids):
values = {
'user_id': w.user_id.id,
'partner_id': partner.id,
'pricelist_id': partner.property_product_pricelist.id,
'section_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'website', 'salesteam_website_sales')[1],
}
sale_order_id = sale_order_obj.create(cr, SUPERUSER_ID, values, context=context)
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner.id, context=context)['value']
sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)
request.session['sale_order_id'] = sale_order_id
if sale_order_id:
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order_id, context=context)
if not sale_order.exists():
request.session['sale_order_id'] = None
return None
# check for change of pricelist with a coupon
if code and code != sale_order.pricelist_id.code:
pricelist_ids = self.pool['product.pricelist'].search(cr, SUPERUSER_ID, [('code', '=', code)], context=context)
if pricelist_ids:
pricelist_id = pricelist_ids[0]
request.session['sale_order_code_pricelist_id'] = pricelist_id
update_pricelist = True
request.session['sale_order_code_pricelist_id'] = False
pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id
# check for change of partner_id ie after signup
if sale_order.partner_id.id != partner.id and request.website.partner_id.id != partner.id:
flag_pricelist = False
if pricelist_id != sale_order.pricelist_id.id:
flag_pricelist = True
|
bradneuman/BlameOverTime
|
blameDBQuery.py
|
Python
|
mit
| 8,282
| 0.006762
|
# Copyright (c) 2014 Brad Neuman
# Common place to put useful re-usable queries into the blame database
def GetCurrentBlame(cursor):
"return a list of tuples of (filename, author, num_lines) that represents"
"the current count of lines from git blame, excluding merges"
cursor.execute('''
select * from
( select filename,
author,
sum(gained_lines) - sum(lost_lines) as lines
from blames
group by filename,author )
where lines > 0;
''')
return cursor.fetchall()
def GetBlameOverTime(cursor):
"return a list of tuples, in order of commits, with values (sha, author, num_lines)"
sql = '''
select sha, author, sum(gained_lines) - sum(lost_lines)
from blames
group by sha, author
order by ROWID
'''
# build up the cumulative sum as we go
ret = []
currLines = {}
for row in cursor.execute(sql):
sha = row[0]
author = row[1]
lineDelta = row[2]
lastLines = 0
if author in currLines:
lastLines = currLines[author]
newLines = lastLines + lineDelta
ret.append( (sha, author, newLines) )
currLines[author] = newLines
return ret
def GetName(nameMap, name):
"helper to return a valid name"
nameStrip = name.strip()
if nameStrip in nameMap:
return nameMap[nameStrip]
else:
return nameStrip
def GetAllAuthors(cursor, nameMap = []):
"return a list of all authors"
names = [tpl[0] for tpl in cursor.execute('select distinct(author) from full_blames').fetchall()]
nameSet = set()
for name in names:
nameSet.add( GetName(nameMap, name) )
return list(nameSet)
def GetFullBlameOverTime(cursor, exclusions = [], nameMap = {}):
"return the whole damn thing. TODO: dont use fetchall, write it out bit by bit"
"list of (timestamp, repo, sha1, { author: num_lines} )"
# go through each repository in the database, and get the blame log for each one
# maps repository -> topo_order -> (ts, commit, { author -> num_lines} )
repos = {}
sql = 'select distinct(repository) from commits'
data = cursor.execute(sql).fetchall()
for row in data:
repos[row[0]] = {}
# print repos
for repo in repos:
sql = '''
select commits.ts, commits.repository, commits.sha, commits.topo_order, full_blames.author, sum(lines)
from full_blames
inner join commits on full_blames.sha = commits.sha
where commits.repository = (?)
'''
for i in range(len(exclusions)):
sql = sql + " and full_blames.filename not like (?) "
sql = sql + '''
group by full_blames.sha, full_blames.author
order by commits.topo_order
'''
tpl = tuple([repo] + exclusions)
print "querying for '%s'..." % repo
for row in cursor.execute(sql, tpl):
ts = row[0]
sha = row[2]
topoOrder = row[3]
author = GetName(nameMap, row[4])
numLines = row[5]
if topoOrder not in repos[repo]:
repos[repo][topoOrder] = (ts, sha, {})
repos[repo][topoOrder][2][author] = numLines
# print "got %d commits from '%s'" % (len(repos[repo]), repo)
# now merge the lists. Keep the topographic order whithin each list, but merge based on timestamp
ret = []
repoIdx = {}
for repo in repos:
repoIdx[repo] = min(repos[repo].keys())
print "merging."
# we want each commit entry to have a sum of the work for each author across all repositories. E.g. if the
# commit is from repo B, we want to show the number of lines for the author as everything already done in
# A + what was just updated in B.
# this will keep track of the last entry for each repo, so we can add them up properly.
# repo -> author -> num_lines
currentWork = {}
for repo in repos:
currentWork[repo] = {}
# will remove the repo when we hit the end
while repoIdx:
# print repoIdx
min_times = []
for repo in repoIdx:
topoOrder = repoIdx[repo]
ts = repos[repo][topoOrder][0]
min_times.append( (ts, repo) )
# find min timestamp
min_entry = min(min_times, key=lambda t: t[0])
ts = min_entry[0]
repo = min_entry[1]
# now we are choosing the next entry from repo
topoOrder = repoIdx[repo]
sha = repos[repo][topoOrder][1]
|
commitWork = repos[repo][topoOrder][2]
for author in commitWork:
# update the currentWork for this repo
currentWork[repo][author] = commitWork[author]
# now create the return data by summing up the current work
sumWork = {}
for sumRepo in currentWork:
for author in currentWork[sumRepo]:
ac = 0
if author in sumWork:
ac = sumWork[author]
|
sumWork[author] = ac + currentWork[sumRepo][author]
ret.append( (ts, repo, sha, sumWork) )
# increment index, and delete it if its not there anymore
repoIdx[repo] += 1
if repoIdx[repo] not in repos[repo]:
# print "finished merging %s" % repo
del repoIdx[repo]
return ret
def GetLatestRevision(cursor, repository):
"return a tuple of (sha, topo_order) for the latest entry in commits for the given repo"
"return None if there are no entreis for the repo"
sql = 'select sha, max(topo_order) from commits where repository = (?)'
return cursor.execute(sql, (repository,)).fetchone()
def GetLatestFullBlames(cursor, repository):
"return a dictionary of filename->author->lines from the last commit in full_blames"
# first, select the highest topo_order commit for that repo
row = GetLatestRevision(cursor, repository)
if row is None or row[0] is None:
return {}
lastRev = row[0]
sql = 'select filename, author, lines from full_blames where sha = (?)'
stats = {}
for row in cursor.execute(sql, (lastRev,)):
filename = row[0]
author = row[1]
lines = row[2]
if author == '':
continue
if filename not in stats:
stats[filename] = {}
stats[filename][author] = lines
return stats
def PrintDiffSpikes(cursor, exclusions = [], nameMap = {}, num = 10):
"Looks over the full blame and returns large spikes which could be due to files that might want to be exluded"
blames = GetFullBlameOverTime( cursor, exclusions, nameMap )
# first sort by author
# map of author -> list of (repo, sha1, num_lines)
blamesByAuthor = {}
for line in blames:
repo = line[1]
sha = line[2]
authorLines = line[3]
for author in authorLines:
if author not in blamesByAuthor:
blamesByAuthor[author] = []
blamesByAuthor[author].append( (repo, sha, authorLines[author]) )
# diff for each author from the previous number of lines
# list of tuples of (author, lineDiff, repo, sha)
diffs = []
for author in blamesByAuthor:
last = 0
for blame in blamesByAuthor[author]:
diff = blame[2] - last
last = blame[2]
diffs.append( (author, diff, blame[0], blame[1]) )
print "computed %d diffs" % len(diffs)
diffs.sort( lambda lhs, rhs : -1 if lhs[1] > rhs[1] else 1 )
print "top %d diff spikes:" % num
for d in diffs[:num]:
print d
def PrintLargeFiles(cursor, exclusions = [], num = 10):
"looks for files that may be causing large spikes in the diff lines"
sql = '''
select filename, repository, MAX(lines) as max_lines
from full_blames
where 1
'''
for i in range(len(exclusions)):
sql = sql + " and filename not like (?) "
sql += '''
group by filename, repository
order by max_lines DESC
limit %d''' % num
print "querying for %d largest files..." % num
tpl = tuple(exclusions)
for row in cursor.execute(sql, tpl):
filename = row[0]
|
pli3/Openwebif
|
plugin/controllers/views/web/sleeptimer.py
|
Python
|
gpl-2.0
| 5,627
| 0.01244
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
impo
|
rt os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction
|
import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979192.582168
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:12 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/web/sleeptimer.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class sleeptimer(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(sleeptimer, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_51193055 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2sleeptimer>
\t<e2enabled>''')
_v = VFFSL(SL,"enabled",True) # u'$enabled' on line 4, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$enabled')) # from line 4, col 13.
write(u'''</e2enabled>
\t<e2minutes>''')
_v = VFFSL(SL,"minutes",True) # u'$minutes' on line 5, col 13
if _v is not None: write(_filter(_v, rawExpr=u'$minutes')) # from line 5, col 13.
write(u'''</e2minutes>
\t<e2action>''')
_v = VFFSL(SL,"action",True) # u'$action' on line 6, col 12
if _v is not None: write(_filter(_v, rawExpr=u'$action')) # from line 6, col 12.
write(u'''</e2action>
\t<e2text>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 7, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 7, col 10.
write(u'''</e2text>
</e2sleeptimer>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_51193055
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_sleeptimer= 'respond'
## END CLASS DEFINITION
if not hasattr(sleeptimer, '_initCheetahAttributes'):
templateAPIClass = getattr(sleeptimer, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(sleeptimer)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=sleeptimer()).run()
|
mvaled/sentry
|
src/sentry/south_migrations/0425_auto__add_index_pullrequest_organization_id_merge_commit_sha.py
|
Python
|
bsd-3-clause
| 107,666
| 0.008053
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from sentry.utils.db import is_postgres
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = False
def forwards(self, orm):
# Adding index on 'PullRequest', fields ['organization_id', 'merge_commit_sha']
if is_postgres():
db.commit_transaction()
db.execute(
"CREATE INDEX CONCURRENTLY {} ON sentry_pull_request (organization_id, merge_commit_sha)".
format(
db.create_index_name(
'sentry_pull_request',
['organization_id', 'merge_commit_sha']
),
)
)
db.start_transaction()
else:
db.create_index('sentry_pull_request', ['organization_id', 'merge_commit_sha'])
def backwards(self, orm):
# Removing index on 'PullRequest', fields ['organization_id', 'merge_commit_sha']
db.delete_index('sentry_pull_request', ['organization_id', 'merge_commit_sha'])
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowe
|
d_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'b5e160fcd8514738a779f4479d08838e12
|
ad800c2f584169bc6952846c658eb4'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'aa93f3afd028426d8faacc03cf4c1286c3e7629ac0d5492ab6035186a3f895f5'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Set Swift'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'e680f7dbdf0441ec94822af839eb0136'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 6, 20, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 7, 20, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'26625e8253354902a12d004625c34f9170ee287efea6428dbd57a633596297d2'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'a564882fa0844eda9b5c02a06f245f05b67a55efe9734f5492eceaae817caf70'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}
|
iledarn/addons-yelizariev
|
res_users_signature/__openerp__.py
|
Python
|
lgpl-3.0
| 445
| 0.01573
|
{
|
'name' : 'Signature templates for user emails',
'version' : '1.0.0',
'author' : 'IT-Projects LLC, Ivan Yelizariev',
'license': 'LGPL-3',
'category' : 'Social Network',
'website' : 'https://yelizariev.github.io',
'depends' : ['base'],
'data':[
'res_users_signature_views.xml',
'security/res_users_signature_security.xml',
'security/ir.model.access.csv',
],
'installable
|
': False
}
|
takluyver/readthedocs.org
|
readthedocs/restapi/utils.py
|
Python
|
mit
| 4,957
| 0.001412
|
import hashlib
import logging
import requests
from builds.models import Version
from projects.utils import slugify_uniquely
from search.indexes import PageIndex, ProjectIndex, SectionIndex
from betterversion.better import version_windows, BetterVersion
log = logging.getLogger(__name__)
def sync_versions(project, versions, type):
"""
Update the database with the current versions from the repository.
"""
# Bookkeeping for keeping tag/branch identifies correct
verbose_names = [v['verbose_name'] for v in versions]
project.ve
|
rsions.filter(verbose_name__in=verbose_names).update(type=type)
old_versions = {}
old_version_values = project.versions.values('identifier', 'verbose_name')
for vers
|
ion in old_version_values:
old_versions[version['verbose_name']] = version['identifier']
added = set()
# Add new versions
for version in versions:
version_id = version['identifier']
version_name = version['verbose_name']
if version_name in old_versions.keys():
if version_id == old_versions[version_name]:
# Version is correct
continue
else:
# Update slug with new identifier
Version.objects.filter(
project=project, verbose_name=version_name
).update(
identifier=version_id,
type=type,
machine=False,
)
log.info("(Sync Versions) Updated Version: [%s=%s] " % (version['verbose_name'], version['identifier']))
else:
# New Version
slug = slugify_uniquely(Version, version['verbose_name'], 'slug', 255, project=project)
Version.objects.create(
project=project,
slug=slug,
type=type,
identifier=version['identifier'],
verbose_name=version['verbose_name'],
)
added.add(slug)
if added:
log.info("(Sync Versions) Added Versions: [%s] " % ' '.join(added))
return added
def delete_versions(project, version_data):
"""
Delete all versions not in the current repo.
"""
current_versions = []
if 'tags' in version_data:
for version in version_data['tags']:
current_versions.append(version['identifier'])
if 'branches' in version_data:
for version in version_data['branches']:
current_versions.append(version['identifier'])
to_delete_qs = project.versions.exclude(
identifier__in=current_versions).exclude(
uploaded=True).exclude(
active=True).exclude(
slug='latest')
if to_delete_qs.count():
ret_val = {obj.slug for obj in to_delete_qs}
log.info("(Sync Versions) Deleted Versions: [%s]" % ' '.join(ret_val))
to_delete_qs.delete()
return ret_val
else:
return set()
def index_search_request(version, page_list, commit):
log_msg = ' '.join([page['path'] for page in page_list])
log.info("(Server Search) Indexing Pages: %s [%s]" % (
version.project.slug, log_msg))
project = version.project
page_obj = PageIndex()
project_scale = 1
#tags = [tag.name for tag in project.tags.all()]
project_obj = ProjectIndex()
project_obj.index_document(data={
'id': project.pk,
'name': project.name,
'slug': project.slug,
'description': project.description,
'lang': project.language,
'author': [user.username for user in project.users.all()],
'url': project.get_absolute_url(),
'tags': None,
'_boost': project_scale,
})
index_list = []
for page in page_list:
log.debug("(API Index) %s:%s" % (project.slug, page['path']))
page_scale = 1
page_id = hashlib.md5('%s-%s-%s' % (project.slug, version.slug, page['path'])).hexdigest()
index_list.append({
'id': page_id,
'project': project.slug,
'version': version.slug,
'path': page['path'],
'title': page['title'],
'headers': page['headers'],
'content': page['content'],
'taxonomy': None,
'commit': commit,
'_boost': page_scale + project_scale,
})
page_obj.bulk_index(index_list, parent=project.slug)
log.info("(Server Search) Deleting files not in commit: %s" % commit)
# Figure this out later
delete_query = {
# ES .90 doesn't wrap this
#"query": {
"bool": {
"must": [
{"term": {"project": project.slug, }},
{"term": {"version": version.slug, }},
],
"must_not": {
"term": {
"commit": commit
}
}
}
#}
}
page_obj.delete_document(body=delete_query)
|
Locu/chronology
|
metis/metis/core/execute/spark/executor.py
|
Python
|
mit
| 8,583
| 0.009321
|
# XXX(usmanm): PySpark in sensitive to modifying Python objects in functions
# like `map`. Please be wary of that! Using deepcopy everywhere will always
# work, but obviously is going to slow things down.
import json
import os
import re
import sys
import tempfile
import types
import zipfile
from copy import deepcopy
from metis import app
from metis.core.execute.base import Executor
from metis.core.execute.utils import generate_filter
from metis.core.execute.ut
|
ils import get_properties_accessed_by_value
from metis.core.execute.utils import get_value
from metis.core.query.condition import Condition
IGNORE_FILES_RE = re.compile('^.*\.pyc$', re.I)
def _copy_lib_for_spark_workers(file_path):
zip_file = zipfile.ZipFile(file_path, 'w')
# TODO(usmanm): Zip only the minimum set of files needed.
for root, dirs, files in os.walk(app.config['PATH'], followlinks=True):
for file_name in files:
|
# Don't copy .pyc files to the lib file.
if IGNORE_FILES_RE.match(file_name):
continue
zip_file.write(os.path.join(root, file_name),
os.path.join(root.replace(app.config['PATH'], 'metis'),
file_name))
zip_file.close()
def _setup_pyspark():
# Set SPARK_HOME environment variable.
os.putenv('SPARK_HOME', app.config['SPARK_HOME'])
# From Python docs: Calling putenv() directly does not change os.environ, so
# it's better to modify os.environ. Also some platforms don't support
# os.putenv. We'll just do both.
os.environ['SPARK_HOME'] = app.config['SPARK_HOME']
# Add PySpark to path.
sys.path.append(os.path.join(app.config['SPARK_HOME'], 'python'))
class SparkExecutor(Executor):
def __init__(self):
# Setup PySpark. This is needed until PySpark becomes available on PyPI,
# after which we can simply add it to requirements.txt.
_setup_pyspark()
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.serializers import MarshalSerializer
# Create a temporary .zip lib file for Metis, which will be copied over to
# Spark workers so they can unpickle Metis functions and objects.
metis_lib_file = tempfile.NamedTemporaryFile(suffix='.zip', delete=False)
metis_lib_file.close()
_copy_lib_for_spark_workers(metis_lib_file.name)
# Also ship the Metis lib file so worker nodes can deserialize Metis
# internal data structures.
conf = SparkConf()
conf.setMaster(app.config['SPARK_MASTER'])
conf.setAppName('chronology:metis')
parallelism = int(app.config.get('SPARK_PARALLELISM', 0))
if parallelism:
conf.set('spark.default.parallelism', parallelism)
self.context = SparkContext(conf=conf,
pyFiles=[metis_lib_file.name],
serializer=MarshalSerializer())
# Delete temporary Metis lib file.
os.unlink(metis_lib_file.name)
# We'll use this to parallelize fetching events in KronosSource.
# The default of 8 is from:
# https://spark.apache.org/docs/latest/configuration.html
self.parallelism = parallelism or 8
def __getstate__(self):
# Don't pickle the `SparkContext` object.
state = self.__dict__.copy()
del state['context']
return state
def finalize(self, rdd):
return rdd.collect()
def execute_aggregate(self, node):
def finalize(event):
# `event` is of the form (key, event).
return node.finalize_func(event[1])
return (self.execute(node.source)
.map(node.group_func)
.reduceByKey(node.reduce_func)
.map(finalize))
def execute_filter(self, node):
return self.execute(node.source).filter(generate_filter(node.condition))
def execute_join(self, node):
left_alias = node.left.alias or 'left'
right_alias = node.right.alias or 'right'
def merge(events):
event1, event2 = events
if isinstance(event1, types.StringType):
# Join case: events = (key, (event1, event2))
event1, event2 = event2
event = deepcopy(event1)
event.update(event2)
else:
# Cartesian case: events = (event1, event2)
event = {}
for key, value in event1.iteritems():
event['%s.%s' % (left_alias, key)] = value
for key, value in event2.iteritems():
event['%s.%s' % (right_alias, key)] = value
return event
def get_equijoin_key_values(condition):
# condition must be a *leaf* condition.
if getattr(condition, 'op', None) != Condition.Op.EQ:
return None
# Get properties being accessed by left and right side of the
# conditional.
left_properties = get_properties_accessed_by_value(condition.left)
right_properties = get_properties_accessed_by_value(condition.right)
if not (left_properties and right_properties):
return None
# Only return getters if both sides of the conditional read from different
# sources. You can't use this optimization say if the condition is
# (left.x + right.y = 10)
# XXX: This isn't kosher for non-deterministic functions.
if (all(p.startswith('%s.' % left_alias) for p in left_properties) and
all(p.startswith('%s.' % right_alias) for p in right_properties)):
return {'left': condition.left, 'right': condition.right}
if (all(p.startswith('%s.' % right_alias) for p in left_properties) and
all(p.startswith('%s.' % left_alias) for p in right_properties)):
return {'left': condition.right, 'right': condition.left}
return None
def map_equijoin(alias, key_values):
def map(event):
new_event = {}
for key, value in event.iteritems():
new_event['%s.%s' % (alias, key)] = value
key = json.dumps([get_value(new_event, value) for value in key_values])
return (key, new_event)
return map
def setup_join():
eq_join_key_values = []
# TODO(usmanm): Right now we only optimize if the conditional is an EQ or
# if its an AND and has some EQ in the top level. We don't do any
# recursive searching in condition trees. Improve that.
condition = node.condition
_type = getattr(condition, 'type', None)
if _type == Condition.Type.AND:
filter_conditions = []
for c in condition.conditions:
values = get_equijoin_key_values(c)
if values:
eq_join_key_values.append(values)
else:
filter_conditions.append(c)
if filter_conditions:
condition.conditions = filter_conditions
else:
condition = None
elif _type != Condition.Type.OR: # Ignore ORs for now.
value = get_equijoin_key_values(condition)
if value:
eq_join_key_values.append(value)
condition = None
return eq_join_key_values, (generate_filter(condition)
if condition else None)
eq_join_key_values, filter_function = setup_join()
if eq_join_key_values:
mapped_left = (self.execute(node.left)
.map(map_equijoin(
left_alias,
[value['left'] for value in eq_join_key_values])))
mapped_right = (self.execute(node.right)
.map(map_equijoin(
right_alias,
[value['right'] for value in eq_join_key_values])))
joined = mapped_left.join(mapped_right).map(merge)
else:
# Naive O(n^2) cartesian product.
joined = (self.execute(node.left).cartesian(self.execute(node.right))
.map(merge))
if filter_function:
joined = joined.filter(filter_function)
return joined
def execute_limit(self, node):
# TODO(usmanm): Is there a better way than to collect and redistribute all
# events?
return self.context.parallelize(self.execute(node.source).take(node.limit))
def execute_order_by(self, node):
return (self.execute(node.source)
.keyBy(lambda e: tuple(get_value(e, field)
for field in node.fields))
.sortByKey(asc
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/snapshots/__init__.py
|
Python
|
gpl-3.0
| 353
| 0.002833
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Commands for reading and manipulating snapshots."""
from googlecloudsdk.calliope import base
class Snapsh
|
ots(base.Group):
"""List, describe, and delete Google Compute Engine snapshots."""
Snapshots.detailed_help = {
'brief': 'List, describe, and delete Google Co
|
mpute Engine snapshots',
}
|
mycodeday/crm-platform
|
website_twitter/controllers/main.py
|
Python
|
gpl-3.0
| 1,936
| 0.006198
|
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.translate import _
import json
class Twitter(http.Controller):
@http.route(['/twitter_reload'], type='json', auth="user", website=True)
def twitter_reload(self):
return request.website.fetch_favorite_tweets()
@http.route(['/get_favorites'], type='json', auth="public", website=True)
def get_
|
tweets(self, limit=20):
key = request.website.twitter_api_key
secret = request.website.twitter_api_secre
|
t
screen_name = request.website.twitter_screen_name
cr, uid = request.cr, request.uid
debug = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
if not key or not secret:
if debug:
return {"error": _("Please set the Twitter API Key and Secret in the Website Settings.")}
return []
if not screen_name:
if debug:
return {"error": _("Please set a Twitter screen name to load favorites from, "
"in the Website Settings (it does not have to be yours)")}
return []
twitter_tweets = request.registry['website.twitter.tweet']
tweets = twitter_tweets.search_read(
cr, uid,
[('website_id','=', request.website.id),
('screen_name','=', screen_name)],
['tweet'], limit=int(limit), order="tweet_id desc", context=request.context)
if len(tweets) < 12:
if debug:
return {"error": _("Twitter user @%(username)s has less than 12 favorite tweets. "
"Please add more or choose a different screen name.") % \
{'username': screen_name}}
else:
return []
return [json.loads(tweet['tweet']) for tweet in tweets]
|
jakobj/binary_network
|
meanfield.py
|
Python
|
bsd-2-clause
| 6,477
| 0.000772
|
# global imports
import numpy as np
import scipy.special as scsp
import scipy.optimize as scop
# local imports
import helper as bhlp
"""
ref Helias14:
Helias, Tetzlaff, Diesmann (2014) The Correlation Structure of
Local Neuronal Networks Intrinsically Results from Recurrent Dynamics
PLoS Comput Biol 10(1): e1003428
DOI: 10.1371/journal.pcbi.1003428
"""
class BinaryMeanfield(object):
"""
this module allows one to calculate the stationary firing rate and
average correlations in a network of binary neurons with one
excitatory and one inhibitory population from connectivity
statistics
"""
def __init__(self, epsilon, N, gamma, g, w, b, K=None):
"""
epsilon: connectivity
N: total number of neurons
gamma: relative size of the excitatory population
g: relative weight of inhibitory connections
w: weight of excitatory connections
b: biases (2d vector), corresponds to -1. * threshold
K: indegree, can be provided as alternative to connectivity
"""
if epsilon is not None:
assert(K is None), 'Please provide connectivity OR indegree.'
elif epsilon is None:
assert(K is not None), 'Please provide connectivity OR indegree.'
self.NE = int(gamma * N)
self.NI = int(N - self.NE)
if epsilon is not None:
KE = int(epsilon * self.NE)
KI = int(epsilon * self.NI)
else:
KE = int(gamma * K)
KI = int(K - KE)
self.K = np.array([[KE, KI],
[KE, KI]])
self.J = np.array([[w, -g * w],
[w, -g * w]])
self.b = np.array(b)
self.C = np.array([[0., 0.],
[0., 0.]])
self.mu = np.array([0., 0.])
def get_mu_meanfield(self, mu0, C=None):
"""
Self-consistent rate
Formula (7) in Helias14
mu0: average rates
C: average correlations
"""
if C is None:
C = np.array([[0., 0.],
[0., 0.]])
def f(mu):
h_mu = self.get_mu_input(mu)
h_sigma = self.get_sigma_input(mu, C)
return mu - 0.5 * scsp.erfc((-self.b - h_mu) / (np.sqrt(2.) * h_sigma))
return scop.fsolve(f, mu0)
def get_mu_input(self, mu):
"""
Mean input given presynaptic activity mu
Formula (4) in Helias14
mu: average rates
"""
mu = np.array(mu)
if np.shape(mu) != (2,):
raise ValueError(
'Mean activity needs to be given for both populations.')
return np.dot(self.K * self.J, mu)
def get_sigma_input(self, mu, C=None):
"""
Standard deviation of input given presynaptic activity mu
(and correlations C)
For C=None: formula (6) in Helias14
For C given: formula (13) in Helias14
mu: averages rates
C: average correlations
"""
mu = np.array(mu)
if np.shape(mu) != (2,):
raise ValueError(
'Mean activity needs to be given for both populations.')
if C is None:
C = np.array([[0., 0.],
[0., 0.]])
else:
C = np.array(C)
if np.shape(C) != (2, 2):
raise ValueError(
'Correlation needs to be given for all combinations of both populations.')
a = bhlp.get_sigma2(mu)
sigma_shared = np.dot(self.K * self.J * self.J, a)
sigma_corr = np.diag(
np.dot(np.dot(self.K * self.J, C), (self.K * self.J).T))
return np.sqrt(sigma_shared + sigma_corr)
def get_suszeptibility(self, mu, sigma):
"""
Suszeptibility (i.e., derivative of Gain function) for Gaussian
input distribution
Formula (8) in Helias14
mu: mean of input
sigma: std of input
"""
return 1. / (np.sqrt(2. * np.pi) * sigma) * np.exp(-1. * (mu + self.b) ** 2 / (2. * sigma ** 2))
def get_w_meanfield(self, mu, C=None):
"""
Linearized population averaged weights
Formula (10) in Helias14
mu: average rates
"""
h_mu = self.get_mu_input(mu)
h_sigma = self.get_sigma_input(mu, C)
return ((self.K * self.J).T * self.get_suszeptibility(h_mu, h_sigma)).T
def get_c_meanfield(self, mu, C=None):
"""
Self-consistent correlations
Formula (24) without external input in Helias14
mu: average rates
"""
a = bhlp.get_sigma2(mu)
A = np.zeros(2)
A[0] = a[0] * 1. / self.NE if self.NE > 0 else 0.
A[1] = a[1] * 1. / self.NI if self.NI > 0 else 0.
W = self.get_w_meanfield(mu, C)
M = np.array([[2. - 2. * W[0, 0], -2. * W[0, 1], 0.],
[-1. * W[1, 0], 2. - (W[0, 0] + W[1, 1]), -1. * W[0, 1]],
[0, -2. * W[1, 0], 2. - 2. * W[1, 1]]])
B = np.array([[2. * W[0, 0], 0],
[W[1, 0], W[0, 1]],
[0, 2. * W[1, 1]]])
rhs = np.dot(B, A)
|
c = np.linalg.solve(M, rhs)
C = np.array([[c[0], c[1]],
[c[1], c[2]]])
return C
def get_m_c_iter(self, mu0):
"""Calculate mean activity and mean correlations in a recurrent
network iteratively, using the improved meanfield approach from
Helias14
mu0: initial guess for average rates
|
"""
if np.shape(mu0) != (2,):
raise ValueError('Initial guess for mean activity needs to be given for both populations.')
Dmu = 1e10
Dc = 1e10
mu = mu0
C = self.C
while Dmu > 1e-15 and Dc > 1e-15:
mu_old = np.sum(mu)
c_old = np.sum(C)
mu = self.get_mu_meanfield(mu, C)
C = self.get_c_meanfield(mu, C)
Dmu = abs(np.sum(mu) - mu_old)
Dc = abs(np.sum(C) - c_old)
self.mu = mu
self.C = C
return mu, C
def get_m(self, mu0):
"""Calculate mean activity in a recurrent
network using meanfield approach
mu0: initial guess for average rates
"""
if np.shape(mu0) != (2,):
raise ValueError('Initial guess for mean activity needs to be given for both populations.')
mu = mu0
mu = self.get_mu_meanfield(mu)
return mu
|
benjaminrigaud/django
|
tests/urlpatterns_reverse/tests.py
|
Python
|
bsd-3-clause
| 37,955
| 0.003478
|
# -*- coding: utf-8 -*-
"""
Unit tests for reverse URL lookups.
"""
from __future__ import unicode_literals
import sys
import unittest
import warnings
from django.contrib.auth.models import User
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (reverse, reverse_lazy, resolve, get_callable,
get_resolver, NoReverseMatch, Resolver404, ResolverMatch, RegexURLResolver,
RegexURLPattern)
from django.http import HttpRequest, HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import redirect
from django.test import TestCase, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from admin_scripts.tests import AdminScriptTestCase
from . import urlconf_outer, middleware, views
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', None, '', 'normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', None, '', 'view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', None, '', 'inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', None, '', 'inc-view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', None, '', 'mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', None, '', 'inc-mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views should have None as the url_name. Regression data for #21157.
('/unnamed/normal/42/37/', None, None, '', 'urlpatterns_reverse.views.empty_view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', None, None, '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', None, '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', None, '', 'inc-no-kwargs', views.empty_view, ('42', '37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', None, 'inc-ns1', 'inc-ns1:inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobjec
|
t-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault
|
', 'other-ns1', 'other-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', None, 'inc-ns5', 'inc-ns5:inner-nothing', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', None, 'inc-ns5', 'inc-ns5:inner-extra', views.empty_view, tuple(), {'outer': '78', 'extra': 'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'
|
peguin40/zulip
|
zerver/lib/push_notifications.py
|
Python
|
apache-2.0
| 10,547
| 0.005404
|
from __future__ import absolute_import
import random
from six import text_type
from typing import Any, Dict, Optional, SupportsInt
from zerver.models import PushDeviceToken, UserProfile
from zerver.models import get_user_profile_by_id
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.decorator import statsd_increment
from zerver.lib.utils import generate_random_to
|
ken
from zerver.lib.redis_utils import get_redis_client
from apns import APNs, Frame, Payload, SENT_BUFFER_QTY
import gcmclient
from django.conf import settings
import base64, binascii, logging, os, time
from functools import partial
# APNS error codes
ERROR_CODES = {
1: 'Processing error',
2: 'Missing device token', # looks like token was empty?
3: 'Missing topic', # topic is encoded
|
in the certificate, looks like certificate is wrong. bail out.
4: 'Missing payload', # bail out, our message looks like empty
5: 'Invalid token size', # current token has wrong size, skip it and retry
6: 'Invalid topic size', # can not happen, we do not send topic, it is part of certificate. bail out.
7: 'Invalid payload size', # our payload is probably too big. bail out.
8: 'Invalid token', # our device token is broken, skipt it and retry
10: 'Shutdown', # server went into maintenance mode. reported token is the last success, skip it and retry.
None: 'Unknown', # unknown error, for sure we try again, but user should limit number of retries
}
redis_client = get_redis_client()
# Maintain a long-lived Session object to avoid having to re-SSL-handshake
# for each request
connection = None
# We maintain an additional APNS connection for pushing to Zulip apps that have been signed
# by the Dropbox certs (and have an app id of com.dropbox.zulip)
dbx_connection = None
# `APNS_SANDBOX` should be a bool
assert isinstance(settings.APNS_SANDBOX, bool)
def get_apns_key(identifer):
# type: (SupportsInt) -> str
return 'apns:' + str(identifer)
class APNsMessage(object):
def __init__(self, user, tokens, alert=None, badge=None, sound=None,
category=None, **kwargs):
# type: (UserProfile, List[text_type], text_type, int, text_type, text_type, **Any) -> None
self.frame = Frame()
self.tokens = tokens
expiry = int(time.time() + 24 * 3600)
priority = 10
payload = Payload(alert=alert, badge=badge, sound=sound,
category=category, custom=kwargs)
for token in tokens:
data = {'token': token, 'user_id': user.id}
identifier = random.getrandbits(32)
key = get_apns_key(identifier)
redis_client.hmset(key, data)
redis_client.expire(key, expiry)
self.frame.add_item(token, payload, identifier, expiry, priority)
def get_frame(self):
# type: () -> Frame
return self.frame
def response_listener(error_response):
# type: (Dict[str, SupportsInt]) -> None
identifier = error_response['identifier']
key = get_apns_key(identifier)
if not redis_client.exists(key):
logging.warn("APNs key, {}, doesn't not exist.".format(key))
return
code = error_response['status']
assert isinstance(code, int)
errmsg = ERROR_CODES[code]
data = redis_client.hgetall(key)
token = data['token']
user = get_user_profile_by_id(int(data['user_id']))
b64_token = hex_to_b64(token)
logging.warn("APNS: Failed to deliver APNS notification to %s, reason: %s" % (b64_token, errmsg))
if code == 8:
# Invalid Token, remove from our database
logging.warn("APNS: Removing token from database due to above failure")
try:
PushDeviceToken.objects.get(user=user, token=b64_token).delete()
except PushDeviceToken.DoesNotExist:
pass
def get_connection(cert_file, key_file):
# type: (str, str) -> APNs
connection = APNs(use_sandbox=settings.APNS_SANDBOX,
cert_file=cert_file,
key_file=key_file,
enhanced=True)
connection.gateway_server.register_response_listener(response_listener)
return connection
if settings.APNS_CERT_FILE is not None and os.path.exists(settings.APNS_CERT_FILE):
connection = get_connection(settings.APNS_CERT_FILE,
settings.APNS_KEY_FILE)
if settings.DBX_APNS_CERT_FILE is not None and os.path.exists(settings.DBX_APNS_CERT_FILE):
dbx_connection = get_connection(settings.DBX_APNS_CERT_FILE,
settings.DBX_APNS_KEY_FILE)
def num_push_devices_for_user(user_profile, kind = None):
# type: (UserProfile, Optional[int]) -> PushDeviceToken
if kind is None:
return PushDeviceToken.objects.filter(user=user_profile).count()
else:
return PushDeviceToken.objects.filter(user=user_profile, kind=kind).count()
# We store the token as b64, but apns-client wants hex strings
def b64_to_hex(data):
# type: (bytes) -> text_type
return binascii.hexlify(base64.b64decode(data)).decode('utf-8')
def hex_to_b64(data):
# type: (text_type) -> bytes
return base64.b64encode(binascii.unhexlify(data.encode('utf-8')))
def _do_push_to_apns_service(user, message, apns_connection):
# type: (UserProfile, APNsMessage, APNs) -> None
if not apns_connection:
logging.info("Not delivering APNS message %s to user %s due to missing connection" % (message, user))
return
frame = message.get_frame()
apns_connection.gateway_server.send_notification_multiple(frame)
# Send a push notification to the desired clients
# extra_data is a dict that will be passed to the
# mobile app
@statsd_increment("apple_push_notification")
def send_apple_push_notification(user, alert, **extra_data):
# type: (UserProfile, text_type, **Any) -> None
if not connection and not dbx_connection:
logging.error("Attempting to send push notification, but no connection was found. "
"This may be because we could not find the APNS Certificate file.")
return
devices = PushDeviceToken.objects.filter(user=user, kind=PushDeviceToken.APNS)
# Plain b64 token kept for debugging purposes
tokens = [(b64_to_hex(device.token), device.ios_app_id, device.token)
for device in devices]
for conn, app_ids in [
(connection, [settings.ZULIP_IOS_APP_ID, None]),
(dbx_connection, [settings.DBX_IOS_APP_ID])]:
valid_devices = [device for device in tokens if device[1] in app_ids]
valid_tokens = [device[0] for device in valid_devices]
if valid_tokens:
logging.info("APNS: Sending apple push notification "
"to devices: %s" % (valid_devices,))
zulip_message = APNsMessage(user, valid_tokens, alert=alert, **extra_data)
_do_push_to_apns_service(user, zulip_message, conn)
else:
logging.warn("APNS: Not sending notification because "
"tokens didn't match devices: %s" % (app_ids,))
# NOTE: This is used by the check_apns_tokens manage.py command. Do not call it otherwise, as the
# feedback() call can take up to 15s
def check_apns_feedback():
# type: () -> None
feedback_connection = APNs(use_sandbox=settings.APNS_SANDBOX,
cert_file=settings.APNS_CERT_FILE,
key_file=settings.APNS_KEY_FILE)
for token, since in feedback_connection.feedback_server.items():
since_date = timestamp_to_datetime(since)
logging.info("Found unavailable token %s, unavailable since %s" % (token, since_date))
PushDeviceToken.objects.filter(token=hex_to_b64(token), last_updated__lt=since_date,
kind=PushDeviceToken.APNS).delete()
logging.info("Finished checking feedback for stale tokens")
if settings.ANDROID_GCM_API_KEY:
gcm = gcmclient.GCM(settings.ANDROID_GCM_API_KEY)
else:
gcm = None
@statsd_increment("android_push_notification")
def send_android_push_notification(user, data):
# type: (UserProfile, Dict[str, Any
|
nens/qgispluginreleaser
|
setup.py
|
Python
|
gpl-3.0
| 1,483
| 0.000674
|
from setuptools import setup
version = '1.2.dev0'
long_description = '\n\n'.join([
open('README.rst').read(),
open('CREDITS.rst').read(),
open('CHANGES.rst').read(),
])
install_requires = [
'setuptools',
'zest.releaser',
],
tests_require = [
'coverage',
'mock',
'nose',
]
setup(name='qgispluginreleaser',
version=version,
description="Add-on for zest.releaser for releasing qgis plugins",
long_description=long_description,
# Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[],
keywords=[],
author='Reinout van Rees',
author_email='reinout@vanrees.org',
url='https://github.com/nens/qgispluginreleaser',
license='GPL',
packages=['qgispluginreleaser'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
extras_require={'test': tests_require},
entry_points={
'console_scripts
|
': [
],
'zest.releaser.releaser.after_checkout': [
'release_plugin = qgispluginreleaser.entry_point:create_zipfile',
],
'zest.releaser.prereleaser.middle': [
'prerelease_plugin = qgispluginreleaser.entry_point:fix_version',
],
'zest.releaser.postreleaser.middle': [
'postrelease_plu
|
gin = qgispluginreleaser.entry_point:fix_version',
],
})
|
lowitty/eeep
|
quotation/migrations/0006_orderingcompany.py
|
Python
|
mit
| 542
| 0.001845
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('quotation', '0005_auto_2015
|
0828_2207'),
]
operations = [
migrations.CreateModel(
name='Orderingcompany',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_
|
created=True, primary_key=True)),
('name', models.CharField(max_length=1024)),
],
),
]
|
pmlrsg/arsf_tools
|
las13/las13.py
|
Python
|
gpl-3.0
| 9,486
| 0.013388
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
###########################################################
# This file has been created by ARSF Data Analysis Node and
# is licensed under the GPL v3 Licence. A copy of this
# licence is available to download with this file.
###########################################################
###########################################################################
#This is the main interface python library for the Las1.3Reader c++ library
# Use it for reading / plotting LAS 1.3 data
###########################################################################
import sys
import pylab
import las13reader
from matplotlib.backends.backend_pdf import PdfPages
#wrapper class for the c++ wrapper
class las13():
"""
Class to wrap the las13reader class to be more user friendly
"""
def __init__(self,filename,quiet=True):
"""
Constructor: takes a LAS 1.3 file as input
"""
if isinstance(filename,str):
self.reader=las13reader.Las1_3_handler(filename)
self.reader.SetQuiet(quiet)
else:
raise Exception("Expected string argument for filename.")
def points_in_bounds(self,bounds):
"""
Function that searches the LAS file and returns all points within the given rectangular bounds.
Inputs:
bounds - a list of 4 floating point values describing north, south, west and east bounds.
Returns:
An object of type las13reader.PulseManager.
"""
if not isinstance(bounds,list):
raise Exception("Expected list argument for bounds (of length 4: north,south,west,east).")
if len(bounds)!=4:
raise Exception("Expected bounds list of length 4: north,south,west,east.")
pmanager=self.reader.GetPointsInBounds(bounds[0],bounds[1],bounds[2],bounds[3])
return pmanager
def points_with_classification(self,classification):
"""
Function that searches the LAS file and returns all points with the given classification value.
Inputs:
classification - an integer value of the classification to search for
Returns:
An object of type las13reader.PulseManager.
"""
if not isinstance(classification,int):
raise Exception("Expected int argument for classification.")
pmanager=self.reader.GetPointsWithClassification(classification)
return pmanager
def read_like_book(self,npoints=1,reset=False):
"""
Function that searches the LAS file and returns points in sequence up to npoints.
Inputs:
npoints - an integer value for the maximum number of points to read
reset - a boolean that when True resets the reader back to the start of the file
Returns:
An object of type las13reader.PulseManager.
"""
if not isinstance(npoints,int):
raise Exception("Expected int argument for npoints.")
if not isinstance(reset,bool):
raise Exception("Expected bool argument for reset.")
pmanager=self.reader.ReadLikeBook(npoints,reset)
return pmanager
def tidy(self):
"""
Function to destroy and free up memory used in any current pulse managers
"""
self.reader.DeletePulseManagers()
###############################################################################
# Static methods below here - functions do not depend on an instance of las13
###############################################################################
#function to return the waveform x,y,z and intensity values from a given pulse
@staticmethod
def waveform(pulse):
"""
Function to return the waveform of intensity values from a given pulse object
Input:
pulse - a las13reader.Pulse object (such that pulsemanagers contain)
Returns:
The waveform as a dictionary with keys 'x','y','z', and 'intensity'.
"""
if not isinstance(pulse,las13reader.Pulse):
print("las13.waveform expects a Pulse object to be passed, not: %s"%type(pulse))
return None
#number of samples
nsamples=pulse.nsamples()
#return a dict of lists
waveform={'x':[],'y':[],'z':[],'intensity':[]}
for s in range(0,nsamples):
samplePos=list(pulse.sampleXYZ(s))
waveform['x'].append(samplePos[0])
waveform['y'].append(samplePos[1])
waveform['z'].append(samplePos[2])
waveform['intensity'].append(pulse.sampleintensity(s))
return waveform
@staticmethod
def discrete(pulse):
"""
Function to return the discrete point information from the given pulse
"""
discrete=[]
for r in range(0,pulse.nreturns()):
discrete.append(discretepoint(pulse,r))
return discrete
#Function to return some (requested) info about the given pulse
@staticmethod
def get_pulse_info(pulse,keyword):
"""
Function to extract the requested information from the given pulse object. This
is really just a helper function to convert vectors into lists.
Inputs:
pulse - the pulse object
keyword - key to describe information requested
Returns:
the requested data
"""
keywords=
|
['time','nreturns','nsamples','origin','offset','scanangle','classification','returnlocs','disint']
if keyword == 'time':
return pulse.time()
elif keyword == 'nreturns':
return pulse.nreturns()
elif keyword == 'nsamples':
r
|
eturn pulse.nsamples()
elif keyword == 'origin':
return list(pulse.originXYZ())
elif keyword == 'offset':
return list(pulse.offsetXYZ())
elif keyword == 'scanangle':
return pulse.scanangle()
elif keyword == 'classification':
return list(pulse.classification())
elif keyword == 'returnlocs':
return list(pulse.returnpointlocation())
elif keyword == 'disint':
return list(pulse.discreteintensities())
else:
print("Keyword should be one of: ",keywords)
raise Exception("Unrecognised keyword in get_pulse_info: %s."%(keyword))
#Function to plot the pulse
@staticmethod
def quick_plot_pulse(pulse,title=None,filename=None):
"""
Function to produce a plot of the pulse waveform data
Inputs:
pulse - the pulse object
title - a title to give the plot
filename - if given the plot is saved to the filename, else displayed on screen
"""
waveform=las13.waveform(pulse)
pylab.plot(waveform['intensity'],'b-',label='Waveform')
pylab.xlabel('Sample number')
pylab.ylabel('Intensity')
if title:
pylab.title(title)
pylab.ylim([0,pylab.ylim()[1]+5])
pylab.legend()
if filename:
pylab.savefig(filename)
else:
pylab.show()
@staticmethod
def plot_all_pulses(pulsemanager,filename):
"""
Function to plot every pulse within a pulsemanager and save to a PDF file
Inputs:
pulsemanager - the pulsemanager object to plot data from
filename - the PDF filename to save the plots to
"""
fileobj=PdfPages(filename)
for p in range(pulsemanager.getNumOfPulses()):
pulse=pulsemanager[p]
waveform=las13.waveform(pulse)
pylab.plot(waveform['intensity'],'b-',label='Waveform')
pylab.plot( [x / pulse.sampletime() for x in las13.get_pulse_info(pulse,'returnlocs')],las13.get_pulse_info(pulse,'disint'),'ro',label='Discrete')
pylab.xlabel('Sample number')
pylab.ylabel('Intensity')
pylab.title('Pulse with time: %f'%pulse.time())
pylab.ylim([0,pylab.ylim()[1]+5])
pylab.legend()
fileobj.savefig()
pylab.clf()
fileobj.clos
|
pczerkas/tempest
|
tempest/api/compute/keypairs/base.py
|
Python
|
apache-2.0
| 1,347
| 0
|
# Copyright 2015 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distr
|
ibuted on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
class BaseK
|
eypairTest(base.BaseComputeTest):
"""Base test case class for all keypair API tests."""
_api_version = 2
@classmethod
def setup_clients(cls):
super(BaseKeypairTest, cls).setup_clients()
cls.client = cls.keypairs_client
def _delete_keypair(self, keypair_name):
self.client.delete_keypair(keypair_name)
def _create_keypair(self, keypair_name, pub_key=None):
kwargs = {'name': keypair_name}
if pub_key:
kwargs.update({'public_key': pub_key})
body = self.client.create_keypair(**kwargs)['keypair']
self.addCleanup(self._delete_keypair, keypair_name)
return body
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/aaa/aaagroup_vpntrafficpolicy_binding.py
|
Python
|
apache-2.0
| 7,121
| 0.037073
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaagroup_vpntrafficpolicy_binding(base_resource) :
""" Binding class showing the vpntrafficpolicy that can be bound to aaagroup.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._acttype = 0
self._groupname = ""
self.___count = 0
@property
def priority(self) :
"""Priority to assign to the policy, as an integer. A lower number indicates a higher priority.
Required when binding a group to a policy. Not relevant to any other
type of group binding.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority to assign to the policy, as an integer. A lower number indicates a higher priority.
Required when binding a group to a policy. Not relevant to any other
type of group binding.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policy(self) :
"""The policy name.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
"""The policy name.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def groupname(self) :
"""Name of the group that you are binding.<br/>Minimum length = 1.
"""
try :
return self._groupname
except Exception as e:
raise e
@groupname.setter
def groupname(self, groupname) :
"""Name of the group that you are binding.<br/>Minimum length = 1
"""
try :
self._groupname = groupname
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(aaagroup_vpntrafficpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaagroup_vpntrafficpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.groupname) :
return str(self.groupname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = aaagroup_vpntrafficpolicy_binding()
updateresource.groupname = resource.groupname
updateresource.policy = resource.policy
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [aaagroup_vpntrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].groupname = resource[i].groupname
updateresources[i].policy = resource[i].policy
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = aaagroup_vpntrafficpolicy_binding()
deleteresource.groupname = resource.groupname
deleteresource.policy = resource.policy
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [aaagroup_vpntrafficpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].groupname = resource[i].groupname
deleteresources[i].policy = resource[i].policy
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, groupname) :
""" Use this API to fetch aaagroup_vpntrafficpolicy_binding resources.
"""
try :
obj = aaagroup_vpntrafficpolicy_binding()
obj.groupname = groupname
response = obj.get_resources(service)
return response
except Exc
|
eption as e:
raise e
@classmethod
def get_filtered(cls, service, groupname, filter_) :
""" Use this API to fetch filtered set of aaagroup_vpntrafficpolicy_binding resources.
Filter string should be i
|
n JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaagroup_vpntrafficpolicy_binding()
obj.groupname = groupname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, groupname) :
""" Use this API to count aaagroup_vpntrafficpolicy_binding resources configued on NetScaler.
"""
try :
obj = aaagroup_vpntrafficpolicy_binding()
obj.groupname = groupname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, groupname, filter_) :
""" Use this API to count the filtered set of aaagroup_vpntrafficpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaagroup_vpntrafficpolicy_binding()
obj.groupname = groupname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class aaagroup_vpntrafficpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.aaagroup_vpntrafficpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaagroup_vpntrafficpolicy_binding = [aaagroup_vpntrafficpolicy_binding() for _ in range(length)]
|
steveandroulakis/mytardis
|
tardis/tardis_portal/templatetags/experimentstats.py
|
Python
|
bsd-3-clause
| 497
| 0
|
#
|
!/usr/bin/python
# -*- coding: utf-8 -*-
from django import template
from tardis.tardis_portal.models import Dataset_File
from django.db.models import Sum
register = template.Library()
@register.filter
def experiment_file_count(value):
return Dataset_File.objects.filter(dataset__experiments__pk=value).count()
# @register.filter
# def exp
|
eriment_file_size(value):....
# return Dataset_File.objects.filter(dataset__experiment__pk=value).
# aggregate(Sum('size'))['size__sum']
|
cjellick/rancher
|
tests/validation/tests/v3_api/test_vmwarevsphere_driver.py
|
Python
|
apache-2.0
| 7,582
| 0.000396
|
import pytest, copy
from .common import * # NOQA
RANCHER_VSPHERE_USERNAME = os.environ.get("RANCHER_VSPHERE_USERNAME", "")
RANCHER_VSPHERE_PASSWORD = os.environ.get("RANCHER_VSPHERE_PASSWORD", "")
RANCHER_VSPHERE_VCENTER = os.environ.get("RANCHER_VSPHERE_VCENTER", "")
RANCHER_VSPHERE_VCENTER_PORT = \
os.environ.get("RANCHER_VSPHERE_VCENTER_PORT", 443)
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME",
random_name() + "-cluster")
ENGINE_INSTALL_URL = os.environ.get("RANCHER_ENGINE_INSTALL_URL",
"https://get.docker.com/")
CLONE_FROM = \
os.environ.get("RANCHER_CLONE_FROM",
"/RNCH-HE-FMT/vm/ubuntu-bionic-18.04-cloudimg")
RESOURCE_POOL = \
os.environ.get("RANCHER_RESOURCE_POOL",
"/RNCH-HE-FMT/host/FMT2.R620.1/Resources/validation-tests")
DATASTORE = \
os.environ.get("RANCHER_DATASTORE",
"/RNCH-HE-FMT/datastore/ranch01-silo01-vm01")
DATASTORE_CLUSTER = \
os.environ.get("RANCHER_DATASTORE_CLUSTER",
"/RNCH-HE-FMT/datastore/ds_cluster")
CLOUD_CONFIG = \
os.environ.get("RANCHER_CLOUD_CONFIG",
"#cloud-config\r\npackages:\r\n - redis-server")
rke_config = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network":
{"plugin": "canal",
"type": "networkConfig",
"options": {"flannelBackendType": "vxlan"}},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService"},
"kubeApi": {
"alwaysPullImages": False,
"podSecurityPolicy": False,
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"}},
"sshAgentAuth": Fa
|
lse}
vsphereConfig = {
"cfgparam": ["disk.enableUUID=TRUE"],
"cloneFrom": CLONE_FROM,
"clo
|
udinit": "",
"contentLibrary": "",
"cpuCount": "4",
"creationType": "vm",
"customAttribute": ["203=CustomA", "204=CustomB"],
"datacenter": "/RNCH-HE-FMT",
"datastore": "",
"datastoreCluster": "",
"diskSize": "20000",
"folder": "/",
"hostsystem": "",
"memorySize": "16000",
"network": ["/RNCH-HE-FMT/network/Private Range 172.16.128.1-21"],
"password": "",
"pool": RESOURCE_POOL,
"sshPassword": "tcuser",
"sshPort": "22",
"sshUser": "docker",
"sshUserGroup": "staff",
"tag": [
"urn:vmomi:InventoryServiceTag:04ffafd0-d7de-440c-a32c-5cd98761f812:GLOBAL",
"urn:vmomi:InventoryServiceTag:d00f1cf2-6822-46a0-9602-679ea56efd57:GLOBAL"
],
"type": "vmwarevsphereConfig",
"username": "",
"vappIpallocationpolicy": "",
"vappIpprotocol": "",
"vappProperty": "",
"vappTransport": "",
"vcenter": "",
"vcenterPort": "443",
}
if_vsphere_var_present = pytest.mark.skipif(
RANCHER_VSPHERE_USERNAME == '' or
RANCHER_VSPHERE_PASSWORD == '' or
RANCHER_VSPHERE_VCENTER == '',
reason='required env variables are not present')
@if_vsphere_var_present
@pytest.mark.usefixtures("create_cluster")
def test_vsphere_provisioning():
client = get_client_for_token(USER_TOKEN)
cluster = get_cluster_by_name(client=client, name=CLUSTER_NAME)
nodes = client.list_node(clusterId=cluster.id).data
assert 4 == len(nodes)
validate_cluster(client, cluster, skipIngresscheck=False)
@pytest.fixture(scope='module', autouse="True")
def create_cluster(request):
client = get_client_for_token(USER_TOKEN)
cloud_cred = create_vsphere_credential(client)
nt = create_vsphere_nodetemplate(
client, cloud_cred, datastore=DATASTORE)
ntcc = create_vsphere_nodetemplate(
client, cloud_cred, datastore=DATASTORE, cloud_config=CLOUD_CONFIG)
ntdsc = create_vsphere_nodetemplate(
client, cloud_cred, datastore_cluster=DATASTORE_CLUSTER)
cluster = client.create_cluster(
name=CLUSTER_NAME,
rancherKubernetesEngineConfig=rke_config)
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
request.addfinalizer(cluster_cleanup)
master_pool = client.create_node_pool({
"type": "nodetemplate",
"clusterId": cluster.id,
"controlPlane": True,
"etcd": True,
"hostnamePrefix": CLUSTER_NAME + "-master",
"nodeTemplateId": nt.id,
"quantity": 1,
"worker": False,
})
worker_pool1 = client.create_node_pool({
"type": "nodetemplate",
"clusterId": cluster.id,
"controlPlane": False,
"etcd": False,
"hostnamePrefix": CLUSTER_NAME + "-worker",
"nodeTemplateId": nt.id,
"quantity": 1,
"worker": True,
})
worker_pool2 = client.create_node_pool({
"type": "nodetemplate",
"clusterId": cluster.id,
"controlPlane": False,
"etcd": False,
"hostnamePrefix": CLUSTER_NAME + "-worker-cc",
"nodeTemplateId": ntcc.id,
"quantity": 1,
"worker": True,
})
worker_pool3 = client.create_node_pool({
"type": "nodetemplate",
"clusterId": cluster.id,
"controlPlane": False,
"etcd": False,
"hostnamePrefix": CLUSTER_NAME + "-worker-dsc",
"nodeTemplateId": ntdsc.id,
"quantity": 1,
"worker": True,
})
client.wait_success(master_pool)
client.wait_success(worker_pool1)
client.wait_success(worker_pool2)
client.wait_success(worker_pool3)
wait_for_cluster_node_count(client, cluster, 4, timeout=900)
def create_vsphere_credential(client):
return client.create_cloud_credential(
name=random_name(),
vmwarevspherecredentialConfig={
"username": RANCHER_VSPHERE_USERNAME,
"password": RANCHER_VSPHERE_PASSWORD,
"vcenter": RANCHER_VSPHERE_VCENTER,
"vcenterPort": RANCHER_VSPHERE_VCENTER_PORT,
}
)
def cluster_cleanup():
if not RANCHER_CLEANUP_CLUSTER:
return
client = get_client_for_token(USER_TOKEN)
cluster = get_cluster_by_name(client=client, name=CLUSTER_NAME)
nodes = get_schedulable_nodes(cluster)
delete_cluster(client, cluster)
for node in nodes:
wait_for_node_to_be_deleted(client, node)
def create_vsphere_nodetemplate(
client, cloud_cred, cloud_config="", datastore="",
datastore_cluster=""):
vc = copy.copy(vsphereConfig)
if cloud_config != "":
vc["cloudConfig"] = cloud_config
if datastore != "":
vc["datastore"] = datastore
if datastore_cluster != "":
vc["datastoreCluster"] = datastore_cluster
return client.create_node_template({
"vmwarevsphereConfig": vc,
"name": random_name(),
"namespaceId": "fixme",
"useInternalIpAddress": True,
"driver": "vmwarevsphere",
"engineInstallURL": ENGINE_INSTALL_URL,
"cloudCredentialId": cloud_cred.id,
})
|
ArvinDevel/incubator-pulsar
|
pulsar-functions/instance/src/test/python/test_python_instance.py
|
Python
|
apache-2.0
| 2,008
| 0.00498
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific la
|
nguage governing permissions and limitations
# under the License.
#
# DEPENDENCIES: unittest2,mock
from contextimpl import ContextImpl
from python_instance import InstanceConfig
from mock import Mock
import Function_pb2
import log
import os
import unittest
class TestContextImpl(unittest.TestCase):
def setUp(s
|
elf):
log.init_logger("INFO", "foo", os.environ.get("PULSAR_HOME") + "/conf/functions-logging/console_logging_config.ini")
def test_context_publish(self):
instance_id = 'test_instance_id'
function_id = 'test_function_id'
function_version = 'test_function_version'
function_details = Function_pb2.FunctionDetails()
max_buffered_tuples = 100;
instance_config = InstanceConfig(instance_id, function_id, function_version, function_details, max_buffered_tuples)
logger = log.Log
pulsar_client = Mock()
producer = Mock()
producer.send_async = Mock(return_value=None)
pulsar_client.create_producer = Mock(return_value=producer)
user_code=__file__
consumers = None
context_impl = ContextImpl(instance_config, logger, pulsar_client, user_code, consumers, None, None)
context_impl.publish("test_topic_name", "test_message")
producer.send_async.assert_called_with("test_message", None, properties=None)
|
Schizo/MediaBrowser
|
python/Temp/sandboxSignalMapper.py
|
Python
|
mit
| 1,120
| 0.002679
|
from PyQt4 import QtGui, QtCore
class Window(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.mapper = QtCore.QSignalMapper(self)
self.toolbar = self.addToolBar('Foo')
self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextOnly)
for text in 'One Two Three'.split():
action = QtGui.QAction(text, self)
self.mapper.setMapping(action, text)
action.triggered.connect(self.mapper.map)
self.toolbar.addAction(action)
self.mapper.mapped['QString'].connect(self.handleButton)
self.edit = QtGui.QLineEdit(self)
self.setCentralWidget(self.edit)
def handleButton(self, identifier):
if identifier == 'One':
text = 'Do This'
elif identifier == 'Two':
text = 'Do That'
elif identifier == 'Three':
tex
|
t = 'Do Other'
self.edit.setText(t
|
ext)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.resize(300, 60)
window.show()
sys.exit(app.exec_())
|
tkf/railgun
|
tests/test_simple_copy.py
|
Python
|
mit
| 1,929
| 0
|
import copy
import unittest
from numpy.testing import assert_equal
from test_simobj import VectCalc
from arrayaccess import DefaultArrayAccess
class TestCopy(unittest.TestCase):
simclass = VectCalc
copyfunc = staticmethod(copy.copy)
clone_v1 = [20] * 10
def check_copy(self, name, value):
orig = self.simclass()
setattr(orig, name, 10)
clone = self.copyfunc(orig)
setattr(orig, name, 20)
assert_equal(getattr(clone, name), value)
def test_copy(self):
self.check_copy('v1', self.clone_v1)
def test_identity(self):
orig = self.simclass()
clone = self.copyfunc(orig)
assert clone is not orig
def test_attrs_identity(self):
orig = self.simclass()
orig.some_random_attr = object()
clone = self.copyfunc(orig)
# NOTE: name = 'num_i' fails here:
for name in self.check_attrs_identity_names:
self.check_attrs_identity(name, clone, orig)
check_attrs_identity_names = ['v1',
|
'v2', 'v3', 'some_random_attr']
def check_attrs_identity(self, name, clone, orig):
msg = 'clone.{0} is orig.{0}'.format(name)
assert getattr(clone, name) is getattr(orig, name), msg
class TestDeepCopy(TestCopy):
|
copyfunc = staticmethod(copy.deepcopy)
clone_v1 = [10] * 10
def check_attrs_identity(self, name, clone, orig):
msg = 'clone.{0} is not orig.{0}'.format(name)
assert getattr(clone, name) is not getattr(orig, name), msg
class MixinTestCopy3D(object):
simclass = DefaultArrayAccess
check_attrs_identity_names = [
'char1d', 'int2d', 'double3d', 'some_random_attr']
clone_int2d = [[20] * 2] * 2
def test_copy(self):
self.check_copy('int2d', self.clone_int2d)
class TestCopy3D(MixinTestCopy3D, TestCopy):
pass
class TestDeepCopy3D(MixinTestCopy3D, TestDeepCopy):
clone_int2d = [[10] * 2] * 2
|
gislab-npo/gislab-web
|
server/webgis/mapcache/management/commands/mapcache_clean.py
|
Python
|
gpl-2.0
| 3,659
| 0.004646
|
import os
import re
import hashlib
import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from webgis.mapcache import Disk
from webgis.app.models import Project_registry
from webgis.map.metadata_parser import MetadataParser
from webgis.map.project import get_last_project_version
class Command(BaseCommand):
"""Django-admin command for cleaning obsolete map caches of all published projects."""
help = 'Clean obsolete map caches of all published projects'
def handle(self, *args, **options):
cache = Disk(base=os.path.join(settings.MEDIA_ROOT, 'cache'))
for project_record in list(Project_registry.objects.all()):
project = project_record.project
project_hash = hashlib.md5(project.encode('utf-8')).hexdigest()
project_cache_dir = os.path.join(cache.basedir, project_hash)
last_publish = 0
last_project_version = get_last_project_version(project)
if last_project_version and os.path.exists(os.path.join(settings.GISQUICK_PROJECT_ROOT, last_project_version+'.qgs')):
version_pattern = re.compile(re.escape(project)+'_(\d{10})')
match = version_pattern.match(last_project_version)
if match:
# timestamp from filename
last_publish = int(match.group(1))
else:
# timestamp from metadata
metadata_filename = os.path.join(settings.GISQUICK_PROJECT_ROOT, last_project_version+'.meta')
if os.path.exists(metadata_filename):
try:
metadata = MetadataParser(metadata_filename)
last_publish = int(metadata.publish_date_unix)
except:
self.stderr.write("Failed to load '{0}' project's metadata file: '{1}'".format(project, metadata_filename))
if os.path.exists(project_cache_dir):
project_publications = os.li
|
stdir(project_cache_dir)
if project_publications:
for publish_tag in project_publications:
if int(publish_tag) != int(last_publish):
publish_pretty = datetime.datetime.fromtimestamp(int(publish_tag)).strftime("%d.%m.%Y %H:%M:%S")
self.stdout.write("Cleaning '{0}' project's publication '{1}' ({2})".format(project, publish_tag, publish_pretty))
|
try:
cache.delete_project_cache(project_hash, publish_tag)
except:
cache_dir = os.path.join(project_cache_dir, publish_tag)
self.stderr.write("Failed to delete '{0}' project's publication '{1}' ({2}): {3}".format(project, publish_tag, publish_pretty, cache_dir))
else:
# remove empty project's cache folder
try:
os.rmdir(project_cache_dir)
except: pass
else:
# project was deleted, clean all existing publications
self.stdout.write("Cleaning cache of deleted project '{0}'".format(project))
try:
cache.delete_project_cache(project_hash)
project_record.delete()
except:
self.stderr.write("Failed to delete '{0}' project's cache: {1}".format(project, project_cache_dir))
|
zsiciarz/variablestars.net
|
stars/models.py
|
Python
|
mit
| 6,165
| 0.000162
|
from django.db import models
from django.db.models import Count
from django.db.models.query import QuerySet
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from model_utils import Choices
from observations.models import Observation
from observations.utils import jd_now
CONSTELLATIONS = Choices(
("AND", _("Andromeda")),
("ANT", _("Antlia")),
("APS", _("Apus")),
("AQR", _("Aquarius")),
("AQL", _("Aquila")),
("ARA", _("Ara")),
("ARI", _("Aries")),
("AUR", _("Auriga")),
("BOO", _("Boötes")),
("CAE", _("Caelum")),
("CAM", _("Camelopardalis")),
("CNC", _("Cancer")),
("CVN", _("Canes Venatici")),
("CMA", _("Canis Major")),
("CMI", _("Canis Minor")),
("CAP", _("Capricornus")),
("CAR", _("Carina")),
("CAS", _("Cassiopeia")),
("CEN", _("Centaurus")),
("CEP", _("Cepheus")),
("CET", _("Cetus")),
("CHA", _("Chamaeleon")),
("CIR", _("Circinus")),
("COL", _("Columba")),
("COM", _("Coma Berenices")),
("CRA", _("Corona Australis")),
("CRB", _("Corona Borealis")),
("CRV", _("Corvus")),
("CRT", _("Crater")),
("CRU", _("Crux")),
("CYG", _("Cygnus")),
("DEL", _("Delphinus")),
("DOR", _("Dorado")),
("DRA", _("Draco")),
("EQU", _("Equuleus")),
("ERI", _("Eridanus")),
("FOR", _("Fornax")),
("GEM", _("Gemini")),
("GRU", _("Grus")),
("HER", _("Hercules")),
("HOR", _("Horologium")),
("HYA", _("Hydra")),
("HYI", _("Hydrus")),
("IND", _("Indus")),
("LAC", _("Lacerta")),
("LEO", _("Leo")),
("LMI", _("Leo Minor")),
("LEP", _("Lepus")),
("LIB", _("Libra")),
("LUP", _("Lupus")),
("LYN", _("Lynx")),
("LYR", _("Lyra")),
("MEN", _("Mensa")),
("MIC", _("Microscopium")),
("MON", _("Monoceros")),
("MUS", _("Musca")),
("NOR", _("Norma")),
("OCT", _("Octans")),
("OPH", _("Ophiuchus")),
("ORI", _("Orion")),
("PAV", _("Pavo")),
("PEG", _("Pegasus")),
("PER", _("Perseus")),
("PHE", _("Phoenix")),
("PIC", _("Pictor")),
("PSC", _("Pisces")),
("PSA", _("Piscis Austrinus")),
("PUP", _("Puppis")),
("PYX", _("Pyxis")),
("RET", _("Reticulum")),
("SGE", _("Sagitta")),
("SGR", _("Sagittarius")),
("SCO", _("Scorpius")),
("SCL", _("Sculptor")),
("SCT", _("Scutum")),
("SER", _("Serpens")),
("SEX", _("Sextans")),
("TAU", _("Taurus")),
("TEL", _("Telescopium")),
("TRI", _("Triangulum")),
("TRA", _("Triangulum Australe")),
("TUC", _("Tucana")),
("UMA", _("Ursa Major")),
("UMI", _("Ursa Minor")),
("VEL", _("Vela")),
("VIR", _("Virgo")),
("VOL", _("Volans")),
("VUL", _("Vulpecula")),
)
CONSTELLATIONS_DICT = dict(CONSTELLATIONS)
class StarQuerySet(QuerySet):
def get_total_stats(self, observer=None):
last_month = jd_now() - 30
star_ids = Observation.objects.filter(jd__gt=last_month).values("star")
if observer:
observed_ids = observer.observations.values("star")
else:
observed_ids = []
return {
"total_star_count": self.count(),
"observed_last_month_count": self.filter(pk__in=star_ids).count(),
"observed_by_you_count": self.filter(pk__in=observed_ids).count(),
}
class Star(models.Model):
"""
A variable star.
"""
constellation = models.CharField(
_("Constellation"), max_length=3, choices=CONSTELLATIONS, db_index=True
)
name = models.CharField(_("Name"), max_length=20, db_index=True)
ra = models.CharField(_("Right Ascension"), max_length=15, default="")
dec = models.CharField(_("Declination"), max_length=15, default="")
variability_type = models.ForeignKey(
"stars.VariabilityType",
null=True,
blank=True,
verbose_name=_("Type of variability"),
on_delete=models.CASCADE,
)
max_magnitude = models.FloatField
|
(_("Maximum brightness"), null=True)
min_magnitude = models.FloatField(_("Minimum brightness"), null=True)
epoch = models.FloatFiel
|
d(_("Epoch"), null=True)
period = models.FloatField(_("Period"), null=True)
# denormalization
observations_count = models.PositiveIntegerField(default=0, editable=False)
objects = StarQuerySet.as_manager()
class Meta:
verbose_name = _("Variable star")
verbose_name_plural = _("Variable stars")
ordering = ("constellation", "name")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("stars:star_detail", kwargs={"pk": self.pk})
def is_periodic(self):
"""
Returns True if the star is periodic (has a defined period and epoch).
"""
return self.epoch is not None and self.period is not None
def get_gcvs_search_name(self):
"""
Fixes GCVS inconsequence in handling escape characters.
"""
return self.name.replace(" ", "+")
def top_observers(self):
return Observation.objects.top_observers().filter(star=self)
def observers_count(self):
return self.observations.aggregate(c=Count("observer", distinct=True))["c"]
def recent_observations(self):
return self.observations.select_related("observer").order_by("-jd")
def get_observations_by_observer(self, observer):
return self.observations.filter(observer=observer)
class VariabilityType(models.Model):
"""
A short description of variability type from GCVS.
"""
code = models.CharField(_("Letter code"), max_length=12, db_index=True)
short_description = models.CharField(
_("Short description"), max_length=100, blank=True, default=""
)
long_description = models.TextField(_("Long description"), default="")
class Meta:
verbose_name = _("Variability type")
verbose_name_plural = _("Variability types")
ordering = ("code",)
def __str__(self):
return self.code
def get_absolute_url(self):
return reverse("stars:variabilitytype_detail", kwargs={"pk": self.pk})
|
TimothyBest/Django_Boilerplate
|
mysite/mysite/settings.py
|
Python
|
mit
| 6,250
| 0.00192
|
"""
Django settings for composersCouch project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = os.environ.get('SECRET_KEY', 'k$s+jts3d$349yo&ojfqo1wvs!f##2w!p&h$4&qd$uz_5&a7%q')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
DEVELOPMENT = os.environ.get('DEVELOPMENT', False)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = DEVELOPMENT
ALLOWED_HOSTS = []
# Application definition
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#'autocomplete_light',
'easy_timezones',
'jinja2',
'pipeline',
'robots',
#'test_without_migrations',
'pipeline_jinja2',
'utils',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'easy_timezones.middleware.EasyTimezoneMiddleware',
'pipeline_jinja2.middleware.MinifyHTMLMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
if DEVELOPMENT:
POSTGIS_VERSION = (2, 1, 4)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
DEFAULT_FROM_EMAIL = 'testing@example.com'
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
MEDIA_URL = '/media/'
PIPELINE_COMPILERS = 'pipeline.compilers.sass.SASSCompiler',
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_CSS = {
'sass': {
'source_filenames': (
'stylesheets/theme.scss',
),
'output_filename': 'stylesheets/style.min.css',
'extra_context': {
'media': 'screen',
},
},
}
else:
# TODO: add production settings
pass
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.environ.get('DB_NAME', 'mydb'),
'OPTIONS': {
'options': '-c search_path=gis,public,pg_catalog'
},
'USER': os.environ.get('DS_USERNAME', 'postgres'),
'PASSWORD': os.environ.get('DS_PASSWORD', 'devDatabase'),
'HOST': os.environ.get('DS_HOSTNAME', 'localhost'),
'PORT': os.environ.get('DS_PORT', ''),
'ATOMIC_REQUESTS': True,
}
}
SITE_ID = 1
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
TIME_ZONE = 'UCT'
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR, 'mysite/media')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join( BASE_DIR, 'mysite/staticfiles/' )
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join( BASE_DIR, 'mysite/static' ),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),],
'APP_DIRS': True,
'OPTIONS': {
'environment' : 'mysite.jinja2.environment',
}
},
]
FILE_UPLOAD_HANDLERS = (
"django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.csrf',
'django.core.context_processors.request',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.co
|
re.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
)
# over ride user defaults
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: "/redirect/%s/" % u.username,
}
PIPELINE_ENABLED= True
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_JS = {
'scripts': {
'source_filenames': (
'scripts/jquery-1.11.0.min.js',
'scripts/fastclick.js',
'scr
|
ipts/foundation.min.js',
),
'output_filename': 'scripts/scripts.min.js',
'extra_context': {
'async': True,
},
}
}
ROBOTS_SITEMAP_URLS = [
'http://www.composerscouch.com/sitemap.xml',
]
GEOIP_DATABASE = os.path.join(STATIC_ROOT, 'GeoLiteCity.dat')
try:
# import app settings here.
# example: from app.settings import *
pass
except ImportError:
pass
|
appsembler/edx-platform
|
openedx/core/djangoapps/appsembler/multi_tenant_emails/tests/test_account_change_email.py
|
Python
|
agpl-3.0
| 5,779
| 0.004326
|
"""
Test cases to cover Accounts change email related to APPSEMBLER_MULTI_TENANT_EMAILS.
"""
import json
from unittest.mock import patch, Mock
from rest_framework import status
from rest_framework.test import APITestCase
from django.urls import reverse
from student.models import PendingEmailChange
from .test_utils import lms_multi_tenant_test, with_organization_context, create_org_user
@lms_multi_tenant_test
@patch(
'openedx.core.djangoapps.user_authn.views.password_reset.get_current_site',
Mock(return_value=Mock(domain='example.com'))
)
@patch(
'openedx.core.djangoapps.ace_common.templatetags.ace._get_google_analytics_tracking_url',
Mock(return_value='http://url.com/')
)
class TestAccountsAPI(APITestCase):
"""
Unit tests for the Accounts views.
This is similar to user_api.accounts..TestAccountsAPI but focuses on the
limited to `APPSEMBLER_MULTI_TENANT_EMAILS` feature.
"""
RED = 'red1'
BLUE = 'blue2'
AHMED_EMAIL = 'ahmedj@example.org'
JOHN_EMAIL = 'johnb@example.org'
PASSWORD = 'test_password'
def send_patch_email(self, user, new_email):
"""
Login and send PATCH request to change the email then logout.
"""
self.client.login(username=user.username, password=self.PASSWORD)
url = reverse('accounts_api', kwargs={'username': user.username})
patch_body = json.dumps({'email': new_email, 'goals': 'change my email'})
response = self.client.patch(url, patch_body, content_type='application/merge-patch+json')
self.client.logout()
return response
def assert_change_email(self, user, new_email):
"""
Assert a successful but PENDING email change.
"""
original_email = user.email
response = self.send_patch_email(user, new_email)
allow_change_email = 'Email change should be allowed: {}'.format(response.content)
assert response.status_code == status.HTTP_200_OK, allow_change_email
pending_changes = PendingEmailChange.objects.filter(user=user)
assert pending_changes.count() == 1, allow_change_email
user.refresh_from_db()
assert user.email == original_email, 'Should not change the email yet'
def assert_confirm_change(self, user, new_email):
# Now call the method that will be invoked with the user clicks the activation key in the received email.
# First we must get the activation key that was sent.
pending_change = PendingEmailChange.objects.get(user=user)
activation_key = pending_change.activation_key
confirm_change_url = reverse(
'confirm_email_change', kwargs={'key': activation_key}
)
confirm_response = self.client.get(confirm_change_url)
assert confirm_response.status_code == status.HTTP_200_OK, confirm_response.content
user.refresh_from_db()
assert user.email == new_email, 'Should change the email successfully'
def test_change_email_success(self):
"""
Email change request is allowed regardless of APPSEMBLER_MULTI_TENANT_EMAILS.
"""
with with_organization_context(site_color=self.RED) as org:
red_ahmed = create_org_user(org, email=self.AHMED_EMAIL, password=self.PASSWORD)
new_email = 'another_email@example.com'
self.assert_change_email(red_ahmed, new_email)
self.assert_confirm_change(red_ahmed, new_email)
def test_change_email_disallow_duplicate(self):
"""
Ensure email reuse is not allowed within the organization regardless of APPSEMBLER_MULTI_TENANT_EMAILS.
"""
with with_organization_context(site_color=self.RED) as org:
red_ahmed = create_org_user(org, email=self.AHMED_EMAIL, password=self.PASSWORD)
red_john = create_org_user(org, email=self.JOHN_EMAIL, password=self.PASSWORD)
response = self.send_patch_email(red_ahmed, red_john.email)
assert response.status_code == status.HTTP_200_OK, '{}: {}'.format(
'Email reuse within the same organization should be disallowed',
response.content
)
pending_changes = PendingEmailChange.objects.filter(user=red_ahmed)
assert not pending_changes.count(), 'Email reuse within the same organization should be disallowed'
assert response.json()['email'] == self.AHMED_EMAIL, 'Should not change the email'
def test_change_email_success_multi_tenant(self):
"""
Email change allows emails in other organizations when APPSEMBLER_MULTI_TENANT_EMAILS is enabled.
Story:
- John registers for the Red Academy via his corporate email address.
- John registers for the Blue University via his Gmail email address.
- John decides to use his corporate email address on Blue University as well.
"""
john_email = 'johnb@gmail.com'
john_corp = 'johnb@corp.biz'
with with_organization_context(site_color=self.RED) as org:
# John registers for the Red Academy via his corporate email address.
red_john = create_org_user(org, email=john_corp, password=self.PASSWORD)
with with_organization_context(site_color=self.BLUE) as org:
# John registers for the Blue University via his Gmail email address.
blue_john = create_org_user(org, e
|
mail=john_email, password=self.PASSWORD)
# John decides to use his corporate email address on Blue University as well.
self.assert_change_email(blue_john, red_john.email) # Use corporate email in anot
|
her organization
self.assert_confirm_change(blue_john, red_john.email) # Reuse Ahmed's email in another organization
|
TriumphLLC/FashionProject
|
modules/operators/wires/basic.py
|
Python
|
gpl-3.0
| 916
| 0.010804
|
import bpy
# from math im
|
port pi, sin, cos
# from bpy_extras.view3d_utils import location_3d_to_region_2d
# from bgl import glEnable, glDisable, glBegin, glEnd, glVertex2f,
|
glVertex3f, glColor4f, glLineWidth, GL_LINE_STRIP, GL_LINE_STIPPLE, GL_BLEND, GL_LINE_LOOP, GL_POLYGON
# from fashion_project.modules.utils import mouse, get_point_abs_location
# from fashion_project.modules.utils.mathlib import deg2rad
from .proto import FP_WireProto
class FP_BasicWire(bpy.types.Operator, FP_WireProto):
'''
Оператор построения направляющей
между выделенной точкой или началом координат
и курсором мыши
'''
bl_idname = 'fp.draw_basic_wire'
bl_label = 'FP wire between selected point and mouse cursor'
def register():
bpy.utils.register_class(FP_BasicWire)
def unregister():
bpy.utils.unregister_class(FP_BasicWire)
|
morelab/weblabdeusto
|
server/src/weblab/exc.py
|
Python
|
bsd-2-clause
| 550
| 0.010929
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of
|
contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from __future__ import print_function, unicode_literals
class WebLabError(Exception):
def __init_
|
_(self,*args,**kargs):
Exception.__init__(self,*args,**kargs)
|
tkerola/chainer
|
tests/chainerx_tests/unit_tests/routines_tests/test_indexing.py
|
Python
|
mit
| 8,519
| 0
|
import unittest
import numpy
import pytest
from chainerx_tests import array_utils
import chainer.testing
import chainerx
import chainerx.testing
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,indices', [
# empty indexing
((), ()),
((3,), ()),
((2, 2, 2), ()),
# integer indexing - non-tuple indexing
((3,), 0),
((3,), 1),
((3,), 2),
((3,), -1),
((2, 3), 0),
((2, 3), 1),
((2, 3), numpy.int8(-1)),
((2, 3), numpy.int32(0)),
((2, 3), numpy.uint64(1)),
# integer indexining - tuple indexing
((3,), (0,)),
((3,), (1,)),
((3,), (2,)),
((3,), (-1,)),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (0, 0)),
((2, 3), (1, 1)),
((2, 3, 4), (0, -2, 3)),
((2, 3, 4), (1, 0)),
# slice indexing - non-tuple indexing
((3,), slice(None)),
((3,), slice(2)),
((3,), slice(0, 3)),
((3,), slice(0, 2)),
((3,), slice(1, 3)),
((3,), slice(0, 0)),
((3,), slice(0, 1)),
((3,), slice(2, 0, -1)),
((3,), slice(-2, -1)),
((3,), slice(2, None, -1)),
((3,), slice(None, 0, 1)),
((3,), slice(None, -1, -1)),
((3,), slice(None, -2, -1)),
((6,), slice(0, 6, 2)),
((6,), slice(1, 6, 2)),
((6,), slice(5, None, -2)),
# slice indexing - tuple indexing
((3,), (slice(None),)),
((3,), (slice(2),)),
((3,), (slice(0, 3),)),
((3,), (slice(0, 2),)),
((3,), (slice(1, 3),)),
((3,), (slice(0, 0),)),
((3,), (slice(0, 1),)),
((3,), (slice(2, 0, -1),)),
((3,), (slice(-2, -1),)),
((3,), (slice(2, None, -1),)),
((3,), (slice(None, 0, 1),)),
((3,), (slice(None, -1, -1),)),
((3,), (slice(None, -2, -1),)),
((6,), (slice(0, 6, 2),)),
((6,), (slice(1, 6, 2),)),
((6,), (slice(5, None, -2),)),
((6,), (slice(50, 1, -1),)),
((6,), (slice(3, 3, 1),)),
((6,), (slice(3, 3, -2),)),
((6,), (slice(50, 50, 1),)),
((6,), (slice(50, 50, -2),)),
((6,), (slice(-50, -50, 1),)),
((6,), (slice(-50, -50, -2),)),
((2, 3), (slice(None), slice(None))),
((2, 3), (slice(1), slice(2))),
((2, 3), (slice(0, 2), slice(0, 3))),
((2, 3), (slice(0, 2), slice(0, -1))),
((2, 3), (slice(0, None, -1), slice(2, 3))),
((2, 3), (slice(0, None, None), slice(-2, 0, -1))),
((2, 3), (slice(1, 2), slice(0, 2))),
((2, 3), (slice
|
(-2, None, -1), slice(0, 3))),
((2, 3), (slice(-2, None, -1), slice(-3, None, -1))),
((2, 3), (slice(-2, None, -1), slice(None, None, -2))),
((2, 3), (slice(1, 2), slice(None, None, 1))),
((2, 3), (slice(1, 2), slice(None, None, 2))),
((2, 3, 4), (slice(1), slice(-2, 3), slice(1, None, -1))),
# newaxis indexing - non-tuple indexing
((), chainerx.newaxis),
((3,), chainerx.newaxis),
# newaxis indexing - tuple indexi
|
ng
((), (chainerx.newaxis,)),
((3,), (chainerx.newaxis,)),
((2, 3), (chainerx.newaxis, chainerx.newaxis)),
# mixed indexing - tuple indexing
((2, 3), (0, slice(1, 3))),
((4, 3), (slice(1, 3), 1)),
((2, 3, 4), (1, slice(2,), slice(1, 3))),
((2, 3), (1, chainerx.newaxis, slice(1, 3))),
((2, 3, 4), (slice(0, 1), slice(1, 2), slice(1, 3), chainerx.newaxis)),
((2, 3, 4), (slice(0, 1), slice(1, 2), chainerx.newaxis, slice(1, 3))),
((2, 3, 4), (slice(0, 1), chainerx.newaxis, slice(1, 2), slice(1, 3))),
((2, 3, 4), (chainerx.newaxis, slice(0, 1), slice(1, 2), slice(1, 3))),
((2, 3, 4),
(1, slice(2,), chainerx.newaxis, slice(1, 3), chainerx.newaxis)),
])
class TestGetitem(op_utils.NumpyOpTest):
# TODO(niboshi): Remove this
check_numpy_strides_compliance = False
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype('float32')
return x,
def forward_xp(self, inputs, xp):
x, = inputs
y = x[self.indices]
return y,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_getitem_zero_sized_offsets(device):
a = chainerx.arange(6)
b = a[3:3]
# Test pre-conditions.
assert b.size == 0
assert b.offset == 12
# The offset of `c` should be the same as `b` since `b` is empty.
c = b[2:]
assert c.size == 0
assert c.offset == b.offset
@op_utils.op_test(['native:0', 'cuda:0'])
# TODO(hvy): Add cases where axis=None, when supported.
@chainer.testing.parameterize_pytest('shape,indices,axis', [
# Valid parameters
((3,), [0], 0),
((3,), [1], 0),
((2, 3), [0], 0),
((2, 3), [0], 1),
((2, 3), [0], -1),
((2, 3), [1], 0),
((2, 3), [0, -1], 0),
((2, 3), [1, 0], 0),
((2, 3), [1, 2], 1),
((2, 3), [2, 1], 1),
((2, 3), [[0], [1]], 0),
# Invalid: Axis out of bounds
((2, 3), [0], 2),
((2, 3), [0], -3),
])
@chainer.testing.parameterize_pytest('is_module', [True, False])
@chainer.testing.parameterize_pytest(
'indices_type', ['list', 'numpy', 'xp'])
# TODO(niboshi): indices_dtype is ignored if indices_type == 'list', which is
# wasteful.
@chainer.testing.parameterize_pytest(
'indices_dtype', chainerx.testing.integral_dtypes)
class TestTake(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
forward_accept_errors = (chainerx.DimensionError, numpy.AxisError)
def setup(self):
if (numpy.dtype(self.indices_dtype).kind == 'u'
and (numpy.array(self.indices, 'int64') < 0).any()):
raise unittest.SkipTest(
'Indices underflows and index out of bounds cannot be tested.')
def generate_inputs(self):
a = numpy.random.uniform(-1, 1, self.shape).astype('float32')
return a,
def forward_xp(self, inputs, xp):
indices = self.indices
axis = self.axis
indices_type = self.indices_type
a, = inputs
assert isinstance(indices, list)
if indices_type == 'list':
pass
elif indices_type == 'numpy':
indices = numpy.array(indices).astype(self.indices_dtype)
elif indices_type == 'xp':
indices = xp.array(indices).astype(self.indices_dtype)
else:
assert False, indices_type
if self.is_module:
b = xp.take(a, indices, axis)
else:
b = a.take(indices, axis)
return b,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('cond_shape,x_shape,y_shape', [
# Same Shapes
((2, 3), (2, 3), (2, 3)),
# Broadcast Shapes
((2, 3), (1, 3), (1, 3)),
((2, 3), (2, 1), (1, 3)),
((2, 3), (2, 3), (1, 3)),
((4, 5), (3, 4, 1), (1, 5)),
((1, 4, 5), (3, 4, 1), (3, 1, 5)),
])
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', dtype_utils.result_dtypes_two_arrays
)
@chainer.testing.parameterize_pytest(
'condition_dtype', chainerx.testing.all_dtypes)
class TestWhere(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def setup(self):
(x_dtype, y_dtype) = self.in_dtypes
if numpy.dtype(x_dtype).kind != 'f' or \
numpy.dtype(y_dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
if x_dtype == 'float16' or y_dtype == 'float16':
self.check_backward_options.update({'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
(x_dtype, y_dtype) = self.in_dtypes
x = array_utils.uniform(self.x_shape, x_dtype)
y = array_utils.uniform(self.y_shape, y_dtype)
condition = numpy.random.uniform(0, 1, size=self.cond_shape)
self.condition = (condition > 0.5).astype(self.condition_dtype)
return (x, y)
def forward_xp(self, inputs, xp):
x, y = inputs
condition = xp.array(self.condition)
o = xp.where(condition, x, y)
o = dtype_utils.cast_if_numpy_array(xp, o, self.out_dtype)
return o,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('cond_shape,x_shape,y_shape', [
((2, 3), (3, 4), (2, 3)),
((2, 3), (2
|
editorsnotes/editorsnotes
|
editorsnotes/auth/utils.py
|
Python
|
agpl-3.0
| 1,133
| 0.000883
|
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.utils.http import urls
|
afe_base64_encode
def send_activation_email(request, user):
b64uid = urlsafe_base64_encode(str(user.id).encode())
token_generator = PasswordResetTokenGenerator()
token = token_generator.make_token(user)
if user.is_active:
raise Exception('Will not send activation key to active user')
send_mail(
'
|
Activate your Editors\' Notes account',
'This email was used to create an account at {site_url}.\n\n'
'To activate your account, visit the following link:\n\n'
'\t{activation_url}\n\n'
'If you did not request an account, please ignore this email.'.format(
site_url=request.build_absolute_uri('/'),
activation_url=request.build_absolute_uri(
reverse('auth:activate_account', args=[b64uid, token])
),
activation_token=token),
settings.SERVER_EMAIL,
[user.email]
)
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_skreeg_hue.py
|
Python
|
mit
| 576
| 0.041667
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_skreeg_hue.iff"
result.attribute_template_id = 9
result.stfName("monster_name","skreeg")
#### BEGIN MODIFICATIONS ####
result.setStringAttribute("radial_filename", "radials/player_pet.py")
result.options_mask = 0x100
result.pvp_status = PVPSTATUS.PvPStatus_None
#### END MODIFICATIONS ####
return
|
result
|
yukaritan/qtbot3
|
qtbot3_service/plugins/4chan.py
|
Python
|
gpl-3.0
| 1,490
| 0.004027
|
import re
import json
import requests
from util import irc
from util.handler_utils import cmdhook, authenticate, get_target
from qtbot3_common.types.message import Message
def scrape(board: str, filtertext: str):
try:
data = requests.get("http://boards.4chan.org/{board}/catalog".format(board=board)).text
match = re.match(".*var catalog = (?P<catalog>\{.*\});.*", data)
if not match:
print("Couldn't scrape catalog")
catalog = json.loads(match.group('catalog'))
for number, thread in catalog['threads'].items():
sub, teaser = thread['sub'], thread['teaser']
if filtertext in sub.lower() or filtertext in teaser.lower():
yield(number, thread)
except Exception as ex:
print("scraping exception:", ex)
@cmdhook('4chan (?P<board>[^\s]+) (?P<filtertext>.+)')
@authenticate
def handle_scrape(message: Message, match, nick: str):
board = match['board']
filtertext = match['filtertext']
print("searching 4chan's {board} board for {filtertext}...".format(**match))
baseurl = "http://boards.4chan.org/{board}/thread/{number}/{semantic_url}"
lines = []
for number, thread in scrape(board, filtertext):
title = (thread['sub'] + ': ' + baseurl).format(number=number, board=board, **thread)
lines.append(title + ' - ' + thread['teaser'])
target = get_targ
|
et(message, nick)
r
|
eturn [irc.chat_message(target, line) for line in lines[:3]]
|
Aeva/voice
|
voice/engines/__init__.py
|
Python
|
gpl-3.0
| 650
| 0
|
# This file is part of Voice
#
# Voice is free sof
|
tware: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Voice is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR P
|
URPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Waterworks. If not, see <http://www.gnu.org/licenses/>.
|
toni-heittola/pelican-btex
|
scholar/__init__.py
|
Python
|
mit
| 23
| 0
|
from
|
.scholar im
|
port *
|
dc3-plaso/plaso
|
tests/output/xlsx.py
|
Python
|
apache-2.0
| 5,608
| 0.010342
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the XLSX output module."""
import os
import unittest
import zipfile
from xml.etree impor
|
t ElementTree
from plaso.containers import events
from plaso.formatters import interface as formatters_interface
from plaso.fo
|
rmatters import manager as formatters_manager
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.output import xlsx
from tests import test_lib as shared_test_lib
from tests.output import test_lib
class TestEvent(events.EventObject):
"""Event object used for testing."""
DATA_TYPE = u'test:xlsx'
def __init__(self):
"""Initializes an event object used for testing."""
super(TestEvent, self).__init__()
self.timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01')
self.timestamp_desc = eventdata.EventTimestamp.CHANGE_TIME
self.hostname = u'ubuntu'
self.filename = u'log/syslog.1'
self.text = (
u'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n '
u'closed for user root) Invalid character -> \ud801')
class TestEventFormatter(formatters_interface.EventFormatter):
"""Event object formatter used for testing."""
DATA_TYPE = u'test:xlsx'
FORMAT_STRING = u'{text}'
SOURCE_SHORT = u'LOG'
SOURCE_LONG = u'Syslog'
class XLSXOutputModuleTest(test_lib.OutputModuleTestCase):
"""Test the XLSX output module."""
_SHARED_STRINGS = u'xl/sharedStrings.xml'
_SHEET1 = u'xl/worksheets/sheet1.xml'
_COLUMN_TAG = u'}c'
_ROW_TAG = u'}row'
_SHARED_STRING_TAG = u'}t'
_SHARED_STRING_TYPE = u's'
_TYPE_ATTRIBUTE = u't'
_VALUE_STRING_TAG = u'}v'
def _GetSheetRows(self, filename):
"""Parses the contents of the first sheet of an XLSX document.
Args:
filename: The file path of the XLSX document to parse.
Returns:
A list of dictionaries representing the rows and columns of the first
sheet.
"""
zip_file = zipfile.ZipFile(filename)
# Fail if we can't find the expected first sheet.
if self._SHEET1 not in zip_file.namelist():
raise ValueError(
u'Unable to locate expected sheet: {0:s}'.format(self._SHEET1))
# Generate a reference table of shared strings if available.
strings = []
if self._SHARED_STRINGS in zip_file.namelist():
zip_file_object = zip_file.open(self._SHARED_STRINGS)
for _, element in ElementTree.iterparse(zip_file_object):
if element.tag.endswith(self._SHARED_STRING_TAG):
strings.append(element.text)
row = []
rows = []
value = u''
zip_file_object = zip_file.open(self._SHEET1)
for _, element in ElementTree.iterparse(zip_file_object):
if (element.tag.endswith(self._VALUE_STRING_TAG) or
element.tag.endswith(self._SHARED_STRING_TAG)):
value = element.text
if element.tag.endswith(self._COLUMN_TAG):
# Grab value from shared string reference table if type shared string.
if (strings and element.attrib.get(self._TYPE_ATTRIBUTE) ==
self._SHARED_STRING_TYPE):
try:
value = strings[int(value)]
except (IndexError, ValueError):
raise ValueError(
u'Unable to successfully dereference shared string.')
row.append(value)
# If we see the end tag of the row, record row in rows and reset.
if element.tag.endswith(self._ROW_TAG):
rows.append(row)
row = []
return rows
def testWriteEventBody(self):
"""Tests the WriteHeader function."""
formatters_manager.FormattersManager.RegisterFormatter(TestEventFormatter)
expected_header = [
u'datetime', u'timestamp_desc', u'source', u'source_long',
u'message', u'parser', u'display_name', u'tag']
expected_event_body = [
u'41087.76181712963', u'Metadata Modification Time', u'LOG', u'Syslog',
u'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session '
u'closed for user root) Invalid character -> \ufffd',
u'-', u'-', u'-']
with shared_test_lib.TempDirectory() as temp_directory:
output_mediator = self._CreateOutputMediator()
output_module = xlsx.XLSXOutputModule(output_mediator)
xslx_file = os.path.join(temp_directory, u'xlsx.out')
output_module.SetFilename(xslx_file)
output_module.Open()
output_module.WriteHeader()
output_module.WriteEvent(TestEvent())
output_module.WriteFooter()
output_module.Close()
try:
rows = self._GetSheetRows(xslx_file)
except ValueError as exception:
self.fail(exception)
self.assertEqual(expected_header, rows[0])
self.assertEqual(len(expected_event_body), len(rows[1]))
self.assertEqual(expected_event_body, rows[1])
def testWriteHeader(self):
"""Tests the WriteHeader function."""
expected_header = [
u'datetime', u'timestamp_desc', u'source', u'source_long',
u'message', u'parser', u'display_name', u'tag']
with shared_test_lib.TempDirectory() as temp_directory:
output_mediator = self._CreateOutputMediator()
output_module = xlsx.XLSXOutputModule(output_mediator)
xlsx_file = os.path.join(temp_directory, u'xlsx.out')
output_module.SetFilename(xlsx_file)
output_module.Open()
output_module.WriteHeader()
output_module.WriteFooter()
output_module.Close()
try:
rows = self._GetSheetRows(xlsx_file)
except ValueError as exception:
self.fail(exception)
self.assertEqual(expected_header, rows[0])
if __name__ == u'__main__':
unittest.main()
|
Enjoying-Learning/Enjoying
|
docs/config/jupyter_notebook_config.py
|
Python
|
mit
| 22,055
| 0.002222
|
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging fo
|
rmatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M
|
:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
# c.JupyterApp.answer_yes = False
# Full path of a config file.
# c.JupyterApp.config_file = ''
# Specify a config file to load.
# c.JupyterApp.config_file_name = ''
# Generate default config file.
# c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = ''
# The full path to a certificate authority certifificate for SSL/TLS client
# authentication.
# c.NotebookApp.client_ca = ''
# The config manager class to use
# c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = 'notebook.services.contents.filemanager.FileContentsManager'
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = []
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = []
#
# c.NotebookApp.file_to_run = ''
# Use minified JS file or not, mainly use during dev to avoid JS recompilation
# c.NotebookApp.ignore_minified_js = False
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = 'localhost'
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = {}
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = ''
# The login handler class to use.
# c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = ''
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = ''
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = []
# The session manager class to use.
# c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = {}
# Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
# c.NotebookApp.tornado_settings = {}
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-
|
ajosephau/psma-gnaf-loader
|
main.py
|
Python
|
gpl-3.0
| 6,393
| 0.003441
|
import logging, os
import psycopg2
# settings
database_name = 'postgres_database'
user = 'postgres_user'
password = 'some_password_here_lol'
port = 5432
host = 'postgres_host_normally_localhost'
path_to_gnaf_data = '/path/to/gnaf/data/'
# setup
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG)
def get_folder_path(support_text, absolute_path, search_path, search_name, test_name):
if not search_path and search_name in test_name:
logging.debug(support_text + absolute_path)
return absolute_path
else:
return search_path
def load_sql_file_into_db(file_path):
file_ref = open(file_path, "r").read()
db_cursor.execute(file_ref)
db_connection.commit()
try:
db_connection = psycopg2.connect(database=database_name, user=user, password=password, host=host, port=port)
db_cursor = db_connection.cursor()
logging.info("Step 0 of 5 : Bootstrapping started...")
gnaf_parent_path = ''
extras_path = ''
table_creation_scripts_path = ''
example_view_creation_scripts_path = ''
table_creation_script_path = ''
foreign_key_script_path = ''
example_view_script_path = ''
authority_code_path = ''
standard_data_path = ''
gnaf_name = 'G-NAF '
table_creation_script_folder_name = 'GNAF_TableCreation_Scripts'
table_creation_script_name = 'create_tables_ansi.sql'
foreign_key_script_name = 'add_fk_constraints.sql'
authority_code_name = 'Authority Code'
standard_data_name = 'Standard'
psv_file_suffix = "_psv.psv"
views_script_folder_name = 'GNAF_View_Scripts'
example_view_script_name = 'address_view.sql'
SQL_STATEMENT = """ COPY %s FROM STDIN WITH CSV HEADER DELIMITER AS '|'"""
# find sub folders needed
for dirname, dirnames, filenames in os.walk(path_to_gnaf_data):
for subdirname in dirnames:
absolute_path = os.path.join(dirname, subdirname)
gnaf_parent_path = get_folder_path("G-NAF parent folder: ", absolute_path, gnaf_parent_path, gnaf_name, subdirname)
table_creation_scripts_path = get_folder_path("Table creation scripts folder: ", absolute_path, table_creation_scripts_path, table_creation_script_folder_name, subdirname)
example_view_creation_scripts_path = get_folder_path("Example View creation scripts folder: ", absolute_path, example_view_creation_scripts_path, views_script_folder_name, subdirname)
authority_code_path = get_folder_path("Authority Code folder: ", absolute_path, authority_code_path, authority_code_name, subdirname)
standard_data_path = get_folder_path("Standard data folder: ", absolute_path, standard_data_path, standard_data_name, subdirname)
# find table/fk creation scripts
for dirname, dirnames, filenames in os.walk(table_creation_scripts_path):
for filename in filenames:
absolute_path = os.path.join(table_creation_scripts_path, filename)
if not table_creation_script_path and table_creation_script_name in filename:
table_creation_script_path = absolute_path
logging.debug("Table creation script: " + table_creation_script_path)
if not foreign_key_script_path and foreign_key_script_name in filename:
foreign_key_script_path = absolute_path
logging.debug("Foreign key script: " + foreign_key_script_path)
# find views creation script
for dirname, dirnames, filenames in os.walk(example_view_creation_scripts_path):
for filename in filenames:
absolute_path = os.path.join(example_view_creation_scripts_path, filename)
if not example_view_script_path and example_view_script_name in filename:
example_view_script_path = absolute_path
logging.debug("Example views script: " + example_view_script_path)
logging.info("Step 0 of 5 : Bootstrapping finished!")
logging.info("Step 1 of 5 : Creating Schema started...")
load_sql_file_into_db(table_creation_script_path)
logging.info("Step 1 of 5 : Creating Schema finished!")
logging.info("Step 2 of 5 : Loading Authority Code data started...")
for dirname, dirnames, filenames in os.walk(authority_code_path):
num_files = str(len(filenames))
for index, filename in enumerate(filenames):
absolute_path = os.path.join(authority_code_path, filename)
authority_code_prefix = "Authority_Code_"
authority_code_suffix = psv_file_suffix
table_name = filename.replace(authority_code_prefix, "")
table_name = table_name.replace(authority_code_suffix, "")
logging.info("Importing file " + str(index + 1) + " of " + num_files + ": " + filename + " -> " + table_name)
db_cursor.copy_expert(sql=SQL_STATEMENT % table_name, file=open(absolute_path))
db_connection.commit()
logging.info("Step 2 of 5 : Loading Authority Code data finished!")
logging.info("Step 3 of 5 : Loading Standard data started...")
for dirname, dirnames, filenames in os.walk(standard_data_path):
num_files = str(len(filenames))
for index, file
|
name in enumerate(filenames):
absolute_path = os.
|
path.join(standard_data_path, filename)
standard_data_suffix = psv_file_suffix
table_name = filename.split('_', 1)[-1]
table_name = table_name.replace(standard_data_suffix, "")
logging.info("Importing file " + str(index + 1) + " of " + num_files + ": " + filename + " -> " + table_name)
db_cursor.copy_expert(sql=SQL_STATEMENT % table_name, file=open(absolute_path))
db_connection.commit()
logging.info("Step 3 of 5 : Loading Standard data finished!")
logging.info("Step 4 of 5 : Creating Foreign Key relationships creation started...")
load_sql_file_into_db(foreign_key_script_path)
logging.info("Step 4 of 5 : Creating Foreign Key relationships creation finished!")
logging.info("Step 5 of 5 : Creating example views creation started...")
load_sql_file_into_db(example_view_script_path)
logging.info("Step 5 of 5 : Creating example views creation finished!")
db_cursor.close()
db_connection.close()
except Exception as exception:
logging.error("Exception occurred: " + str(exception))
|
mitodl/lore
|
importer/tasks.py
|
Python
|
agpl-3.0
| 2,045
| 0.001956
|
"""
Celery tasks for import module.
"""
from __future__ import unicode_literals
import json
import logging
from django.conf import settings
import requests
from statsd.defaults.django import statsd
from lore.celery import async
from learningresources.api import update_xanalytics
from xanalytics import send_request, get_result
log = logging.getLogger(__name__)
RETRY_LIMIT = 10
@async.task
@statsd.timer('lore.import_file')
def import_file(path, repo_id, user_id):
"""Asynchronously import a course."""
from importer.api import import_course_from_file
import_course_from_file(path, repo_id, user_id)
@async.task
def populate_xanalytics_fields(course_id):
"""
Initiate request to xanalytics API to get stats for a course,
then trigger async job to retrieve results when they become available.
Args:
course_id (int): primary key of a Course
"""
if settings.XANALYTICS_URL != "":
token = send_req
|
uest(settings.XANALYTICS_URL + "/create", course_id)
check_for_results.apply_async(
kwargs={"token": token, "wait": 5, "a
|
ttempt": 1}, countdown=5)
@async.task
def check_for_results(token, wait, attempt):
"""
Check for xanalytics results for a course.
Args:
token (string): Token received from xanalytics server.
wait (int): Seconds to wait before repository.
attempt (int): Attempt number, so we don't try forever.
"""
resp = get_result(settings.XANALYTICS_URL + "/status", token)
if resp["status"] == "still busy" and attempt < RETRY_LIMIT:
attempt += 1
wait *= 2
check_for_results.apply_async(
kwargs={"token": token, "wait": wait, "attempt": attempt},
countdown=wait,
)
if resp["status"] == "complete":
content = requests.get(resp["url"]).content
try:
data = json.loads(content)
update_xanalytics(data)
except (ValueError, TypeError):
log.error("Unable to parse xanalytics response: %s", content)
|
cepheidxa/python
|
selinux_check/test_type.py
|
Python
|
gpl-3.0
| 4,408
| 0.002949
|
import unittest
from type import Type
class TestType(unittest.TestCase):
def setUp(self):
self.__statement1 = [
'type name, attri1;',
' type name,attri1 ;',
'type name , attri1; ',
' type name ,attri1 ; ',
]
self.__statement2 = [
'type name, attri1, attri2;',
' type name,attri1,attri2 ;',
'type name, attri1 ,attri2; ',
' type name ,attri1, attri2 ; ',
]
self.__statement3 = [
'type name, attri1, attri2, attri3;',
' type name,attri1,attri2,attri3 ;',
'type name ,attri1, attri2 ,attri3; ',
' type name, attri1 ,attri2, attri3 ; ',
]
self.__err_statements = [
'type name, attri1',
'type name,, attri1;',
'name, attri1',
'type name attri1;',
'type, name attri1;',
'type name, attri1, attri2',
'type name, attri1 attri2;',
'type name,, attri1, attri2;',
'type, name, attri1 attri2;',
'name, attri1, attri2',
'type name, attri1, attri2, attri3',
'name, attri1, attri2, attri3',
'type name, attri1, attri2 attri3;',
'type, name, attri1, attri2 attri3;',
'type name, attri1, attri2,, attri3;',
]
def test_statement_error(self):
for statement in self.__err_statements:
self.assertRaises(ValueError, Type, statement)
def test_type(self):
for statement in self.__statement1:
t = Type(statement)
self.assertEqual(t.getName(), 'name')
self.assertEqual(t.getAttributes(), ['attri1'])
for statement in self.__statement2:
t = Type(statement)
self.assertEqual(t.getName(), 'name')
self.assertEqual(t.getAttributes(), ['attri1', 'attri2'])
for statement in self.__statement3:
t = Type(statement)
self.assertEqual(t.getName(), 'name')
self.assertEqual(t.getAttributes(), ['attri1', 'attri2', 'attri3'])
def test_dump_statement(self):
|
for statement in self.__statement1:
t = Type(statement)
self.ass
|
ertEqual(t.dump_statement(), 'type name, attri1;')
for statement in self.__statement2:
t = Type(statement)
self.assertEqual(t.dump_statement(), 'type name, attri1, attri2;')
for statement in self.__statement3:
t = Type(statement)
self.assertEqual(t.dump_statement(), 'type name, attri1, attri2, attri3;')
def test_eq(self):
t1 = Type(self.__statement1[0])
for statement in self.__statement1:
t = Type(statement)
self.assertEqual(t, t1)
for statement in self.__statement2:
t = Type(statement)
self.assertEqual(t, t1)
for statement in self.__statement3:
t = Type(statement)
self.assertEqual(t, t1)
self.assertFalse(t1 == 'name')
t2 = Type('type name2, attri1, attri2;')
for statement in self.__statement1:
t = Type(statement)
self.assertTrue(t != t2)
for statement in self.__statement2:
t = Type(statement)
self.assertTrue(t != t2)
for statement in self.__statement3:
t = Type(statement)
self.assertTrue(t != t2)
def test_hasAttribute(self):
for statement in self.__statement1:
t = Type(statement)
self.assertTrue(t.hasAttribute('attri1'))
self.assertFalse(t.hasAttribute('attri2'))
self.assertFalse(t.hasAttribute('attri3'))
for statement in self.__statement2:
t = Type(statement)
self.assertTrue(t.hasAttribute('attri1'))
self.assertTrue(t.hasAttribute('attri2'))
self.assertFalse(t.hasAttribute('attri3'))
for statement in self.__statement3:
t = Type(statement)
self.assertTrue(t.hasAttribute('attri1'))
self.assertTrue(t.hasAttribute('attri2'))
self.assertTrue(t.hasAttribute('attri3'))
self.assertFalse(t.hasAttribute('attri4'))
if __name__ == '__main__':
unittest.main()
|
hfp/tensorflow-xsmm
|
tensorflow/contrib/lookup/lookup_ops_test.py
|
Python
|
apache-2.0
| 106,863
| 0.011922
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.lookup.lookup."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import lookup
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training.checkpointable import util as checkpointable
class HashTableOpTest(test.TestCase):
def testHashTable(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
exported_keys_tensor, exported_values_tensor = table.export()
self.assertItemsEqual([b"brain", b"salad", b"surgery"],
exported_keys_tensor.eval())
self.assertItemsEqual([0, 1, 2], exported_values_tensor.eval())
def testHashTableFindHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testHashTableInitWith
|
PythonArrays(self):
with self.cached_session():
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64),
default_val)
table.initializer.ru
|
n()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableInitWithNumPyArrays(self):
with self.cached_session():
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testMultipleHashTables(self):
with self.cached_session() as sess:
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table2 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table3 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
lookup_ops.tables_initializer().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testHashTableWithTensorDefault(self):
with self.cached_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableWithSparseTensorInput(self):
with self.cached_session() as sess:
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = sess.run(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
# Ref types do not produce a lookup signature mismatch.
input_string_ref = variables.Variable("brain")
variables.global_variables_initializer().run()
self.assertEqual(0, table.lookup(input_string_ref).eval())
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
with self.cached_session():
default_val = -1
with self.assertRaises(TypeEr
|
googlearchive/appengine-sqlite-guestbook-python
|
main.py
|
Python
|
apache-2.0
| 5,760
| 0.000174
|
"""A guestbook sample with sqlite3."""
import logging
import os
import jinja2
import sqlite3
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import modules
from google.appengine.api import runtime
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(o
|
s.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
DB_FILENAME = os.path.join('/tmp', 'guestbook.sqlite')
CREATE_TABLE_SQL = """\
CREATE TABLE IF NOT EXISTS guestbook
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR, content VARCHAR)"""
SELECT_SQL = 'SELECT * FROM guestbook ORDER BY id DESC LIMIT {}'
INSERT_SQL = 'INSERT INTO guestbook (name, content) VALUES (?, ?)'
POST_PER_PAGE = 20
def shutdown_hook():
"
|
""A hook function for de-registering myself."""
logging.info('shutdown_hook called.')
instance_id = modules.get_current_instance_id()
ndb.transaction(
lambda: ActiveServer.get_instance_key(instance_id).delete())
def get_connection():
"""A function to get sqlite connection.
Returns:
An sqlite connection object.
"""
logging.info('Opening a sqlite db.')
return sqlite3.connect(DB_FILENAME)
def get_url_for_instance(instance_id):
"""Return a full url of the guestbook running on a particular instance.
Args:
A string to represent an VM instance.
Returns:
URL string for the guestbook form on the instance.
"""
hostname = app_identity.get_default_version_hostname()
return 'https://{}-dot-{}-dot-{}/guestbook'.format(
instance_id, modules.get_current_version_name(), hostname)
def get_signin_navigation(original_url):
"""Return a pair of a link text and a link for sign in/out operation.
Args:
An original URL.
Returns:
Two value tuple; a url and a link text.
"""
if users.get_current_user():
url = users.create_logout_url(original_url)
url_linktext = 'Logout'
else:
url = users.create_login_url(original_url)
url_linktext = 'Login'
return url, url_linktext
class ActiveServer(ndb.Model):
"""A model to store active servers.
We use the instance id as the key name, and there are no properties.
"""
@classmethod
def get_instance_key(cls, instance_id):
"""Return a key for the given instance_id.
Args:
An instance id for the server.
Returns:
A Key object which has a common parent key with the name 'Root'.
"""
return ndb.Key(cls, 'Root', cls, instance_id)
class ListServers(webapp2.RequestHandler):
"""A handler for listing active servers."""
def get(self):
"""A get handler for listing active servers."""
key = ndb.Key(ActiveServer, 'Root')
query = ActiveServer.query(ancestor=key)
servers = []
for key in query.iter(keys_only=True):
instance_id = key.string_id()
servers.append((instance_id, get_url_for_instance(instance_id)))
template = JINJA_ENVIRONMENT.get_template('index.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.out.write(template.render(servers=servers,
url=url,
url_linktext=url_linktext))
class MainPage(webapp2.RequestHandler):
"""A handler for showing the guestbook form."""
def get(self):
"""Guestbook main page."""
con = get_connection()
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute(SELECT_SQL.format(POST_PER_PAGE))
greetings = cur.fetchall()
con.close()
template = JINJA_ENVIRONMENT.get_template('guestbook.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.write(template.render(greetings=greetings,
url=url,
url_linktext=url_linktext))
class Guestbook(webapp2.RequestHandler):
"""A handler for storing a message."""
def post(self):
"""A handler for storing a message."""
author = ''
if users.get_current_user():
author = users.get_current_user().nickname()
con = get_connection()
with con:
con.execute(INSERT_SQL, (author, self.request.get('content')))
self.redirect('/guestbook')
class Start(webapp2.RequestHandler):
"""A handler for /_ah/start."""
def get(self):
"""A handler for /_ah/start, registering myself."""
runtime.set_shutdown_hook(shutdown_hook)
con = get_connection()
with con:
con.execute(CREATE_TABLE_SQL)
instance_id = modules.get_current_instance_id()
server = ActiveServer(key=ActiveServer.get_instance_key(instance_id))
server.put()
class Stop(webapp2.RequestHandler):
"""A handler for /_ah/stop."""
def get(self):
"""Just call shutdown_hook now for a temporary workaround.
With the initial version of the VM Runtime, a call to
/_ah/stop hits this handler, without invoking the shutdown
hook we registered in the start handler. We're working on the
fix to make it a consistent behavior same as the traditional
App Engine backends. After the fix is out, this stop handler
won't be necessary any more.
"""
shutdown_hook()
APPLICATION = webapp2.WSGIApplication([
('/', ListServers),
('/guestbook', MainPage),
('/sign', Guestbook),
('/_ah/start', Start),
('/_ah/stop', Stop),
], debug=True)
|
GarmanGroup/RABDAM
|
rabdam/Subroutines/CalculateBDamage.py
|
Python
|
lgpl-3.0
| 35,945
| 0.001641
|
# RABDAM
# Copyright (C) 2020 Garman Group, University of Oxford
# This file is part of RABDAM.
# RABDAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# RABDAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
class rabdam(object):
def __init__(self, pathToInput, outputDir, batchRun, overwrite, PDT,
windowSize, protOrNA, HETATM, removeAtoms, addAtoms,
highlightAtoms, createOrigpdb, createAUpdb, createUCpdb,
createAUCpdb, createTApdb):
self.pathToInput = pathToInput
self.outputDir = outputDir
self.batchRun = batchRun
self.overwrite = overwrite
self.PDT = PDT
self.windowSize = windowSize
self.protOrNA = protOrNA
self.HETATM = HETATM
self.removeAtoms = removeAtoms
self.addAtoms = addAtoms
self.highlightAtoms = highlightAtoms
self.createOrigpdb = createOrigpdb
self.createAUpdb = createAUpdb
self.createUCpdb = createUCpdb
self.createAUCpdb = createAUCpdb
self.createTApdb = createTApdb
def rabdam_dataframe(self, test=False):
"""
Calculates BDamage for selected atoms within input PDB file and
writes output to DataFrame.
"""
prompt = '> '
import sys
import os
import shutil
import numpy as np
import pickle
if sys.version_info[0] < 3:
user_input = raw_input
else:
user_input = input
if __name__ == 'Subroutines.CalculateBDamage':
from Subroutines.PDBCUR import (
parse_mmcif_file, parse_pdb_file, clean_atom_rec, genPDBCURinputs,
runPDBCUR
)
from Subroutines.parsePDB import (
download_mmcif, copy_input, full_atom_list, b_damage_atom_list
)
from Subroutines.translateUnitCell import (
convertToCartesian, translateUnitCell, extract_unit_cell_params
)
from Subroutines.trimUnitCellAssembly import (
getAUparams, convertParams, trimAtoms
)
from Subroutines.makeDataFrame import (
convert_array_to_atom_list, makePDB, writeDataFrame
)
from Subroutines.BDamage import (
get_xyz_from_objects, calc_packing_density,
write_pckg_dens_to_atoms, calcBDam
)
else:
from rabdam.Subroutines.PDBCUR import (
parse_mmcif_file, parse_pdb_file, clean_atom_rec, genPDBCURinputs,
runPDBCUR
)
from rabdam.Subroutines.parsePDB import (
download_mmcif, copy_input, full_atom_list, b_damage_atom_list
)
from rabdam.Subroutines.translateUnitCell import (
convertToCartesian, translateUnitCell, extract_unit_cell_params
)
from rabdam.Subroutines.trimUnitCellAssembly import (
getAUparams, convertParams, trimAtoms
)
from rabdam.Subroutines.makeDataFrame import (
convert_array_to_atom_list, makePDB, writeDataFrame
)
from rabdam.Subroutines.BDamage import (
get_xyz_from_objects, calc_packing_density,
write_pckg_dens_to_atoms, calcBDam
)
print('**************************** RABDAM ****************************\n')
print('\n****************************************************************\n'
'***************** Program to calculate BDamage *****************\n'
'****************************************************************\n')
print('****************************************************************\n'
'************************* Input Section ************************\n')
# Prints the values of the program options to be used in the current
# RABDAM run
print('Calculating BDamage for %s' % self.pathToInput)
print('Writing output files to %s' % self.outputDir)
if self.PDT == 7:
print('Using default packing density threshold of 7 Angstroms')
else:
print('Packing density threshold defined by user as %s Angstroms' % self.PDT)
if self.windowSize == 0.02:
print('Using default window size of 2%')
else:
print('Window size defined by user as %s%%' % (self.windowSize*100))
if self.HETATM is True:
print('Keeping HETATM')
elif self.HETATM is False:
print('Removing HETATM (default)')
if self.protOrNA == 'protein':
print('Retaining protein atoms, discarding nucleic acid atoms (default)')
elif self.protOrNA in ['nucleicacid', 'na']:
print('Retaining nucleic acid atoms, discarding protein atoms')
if len(self.removeAtoms) == 0:
print('No atoms to be removed (default)')
else:
remove_atoms_string = ''
for value in self.removeAtoms:
remove_atoms_string = remove_atoms_string + value + ', '
print('Atoms to be removed: %s' % remove_atoms_string[:-2])
if len(self.addAtoms) == 0:
print('No atoms to be added (default)')
else:
add_atoms_string = ''
for value in self.addAtoms:
add_atoms_string = add_atoms_string + value + ', '
print('Atoms to be added: %s' % add_atoms_string[:-2])
print('\n********************* End of Input Section *********************\n'
'****************************************************************\n')
# Changes directory to the specified location for the output 'Logfiles'
# directory. The default location is the current working directory.
cwd = os.getcwd()
os.chdir(self.outputDir)
print('****************************************************************\n'
'********************** Process PDB Section *********************\n')
# Creates a new directory named after the input PDB file (after
# checking that this directory does not already exist) in the
# 'Logfiles' directory. Then saves a copy of the input PDB file to the
# new directory.
# If 4 digit PDB accession code has been supplied:
if len(self.pathToInput) == 4:
print('Accession code supplied')
PDBcode = self.pathToInput.upper()
PDBdirectory = 'Logfiles/%s/' % PDBcode
file_name
|
_start = '%s%s' % (PDBdirec
|
tory, PDBcode)
pathToInput = '%s%s.cif' % (PDBdirectory, PDBcode)
# If directory with same name as PDBdirectory already exists in
# 'Logfiles' directory, user input is requested ('Do you want to
# overwrite the existing folder?'):
# yes = old PDBdirectory is deleted, new PDBdirectory is created
# and copy of the mmCIF file is downloaded from the RSCB PDB
# website and saved to the new directory
# no = old PDBdirectory is retained, exit program
# If it doesn't already exist, new PDBdirectory is created and
# copy of the mmCIF file is downloaded from the RSCB PDB website
# and saved to the new directory.
if os.path.isdir(PDBdirectory):
print('\nFolder %s already exists locally at %s' % (
PDBcode, PDBdirectory
))
print('Do you
|
ed-/solum
|
solum/objects/sqlalchemy/infrastructure_stack.py
|
Python
|
apache-2.0
| 1,686
| 0
|
# Copyright 2014 - Numergy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from solum.objects import infrastructure_stack as abstract
from solum.objects.sqlalchemy import models as sql
cla
|
ss InfrastructureStack(sql.Base, abstract.InfrastructureStack):
"""Represent an infrastructure_stack in sqlalchemy."""
__tablename__ = 'infrastructure_stack'
__resource__ = 'infrastructure/stacks'
__table_args__ = sql.table_args()
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), nullable=False)
project_id = sa.Column(sa.String(36))
user_id = sa.Column(sa.String(36))
image_id = sa.Column(
|
sa.String(36))
heat_stack_id = sa.Column(sa.String(36))
name = sa.Column(sa.String(100))
description = sa.Column(sa.String(255))
tags = sa.Column(sa.Text)
class InfrastructureStackList(abstract.InfrastructureStackList):
"""Represent a list of infrastructure_stacks in sqlalchemy."""
@classmethod
def get_all(cls, context):
return InfrastructureStackList(sql.model_query(context,
InfrastructureStack))
|
TomBaxter/waterbutler
|
tests/server/api/v0/test_zip.py
|
Python
|
apache-2.0
| 919
| 0.001088
|
import io
import zipfile
from tornado import testing
from waterbutler.core import streams
from waterbutler.core.utils import AsyncIterator
from tests import utils
class TestZipHandler(utils.HandlerTestCase):
HOOK_PATH = 'waterbutler.server.api.v0.zip.ZipHandler._send_hook'
@testing.gen_test
def test_download_stream(self):
data = b'freddie brian john roger'
stream = streams.StringStream(data)
stream.content_type = 'application/octet-stream'
|
zipstream = streams.ZipStreamReader(AsyncIterator([('file.txt', stream)]))
self.mock_provider.zip = utils.MockCoroutine(return_value=zipstream)
resp = yield self.http_client.fetch(
self.get_url('/zip?provider=queenhub&path=/freddie.png'
|
),
)
zip = zipfile.ZipFile(io.BytesIO(resp.body))
assert zip.testzip() is None
assert zip.open('file.txt').read() == data
|
raildo/nova
|
nova/tests/functional/api_sample_tests/test_floating_ips_bulk.py
|
Python
|
apache-2.0
| 3,805
| 0.000526
|
# Copyright 2014 IBM Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import context
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('public_interface', 'nova.network.linux_net')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class FloatingIpsBulkTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-floating-ips-bulk"
def _get_flags(self):
f = super(FloatingIpsBulkTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.floating_ips_bulk.Floating_ips_bulk')
return f
def setUp(self):
super(FloatingIpsBulkTest, self).setUp()
pool = CONF.default_floating_pool
interface = CONF.public_interface
self.ip_pool = [
{
'address': "10.10.10.1",
'pool': pool,
'interface': interface,
'host': None
},
{
'address': "10.10.10.2",
'pool': pool,
'interface': interface,
'host': None
},
{
'address': "10.10.10.3",
'pool': pool,
'interface': interface,
'host': "testHost"
},
]
self.compute.db.floating_ip_bulk_create(
context.get_admin_context(), self.ip_pool)
self.addCleanup(self.compute.db.floating_ip_bulk_destroy,
context.get_admin_context(), self.ip_pool)
def test_floating_ips_bulk_list(self):
response = self._do_get('os-floating-ips-bulk')
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-list-resp',
subs, response, 200)
def test_floating_ips_bulk_list_by_host(self):
response = self._do_get('os-floating-ips-bulk/testHost')
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-list-by-host-resp',
subs, response, 200)
|
def test_floating_ips_bulk_create(self):
response = self._do_post('os-floating-ips-bulk',
'floating-ips-bulk-create-req',
|
{"ip_range": "192.168.1.0/24",
"pool": CONF.default_floating_pool,
"interface": CONF.public_interface})
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-create-resp', subs,
response, 200)
def test_floating_ips_bulk_delete(self):
response = self._do_put('os-floating-ips-bulk/delete',
'floating-ips-bulk-delete-req',
{"ip_range": "192.168.1.0/24"})
subs = self._get_regexes()
self._verify_response('floating-ips-bulk-delete-resp', subs,
response, 200)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.