repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
dan-passaro/django-recommend | simplerec/quotes/tests.py | Python | mit | 2,071 | 0 | """Tests for the Quotes app."""
import pytest
from django.core import exceptions
from . import models
@pytest.mark.django_db
def test_quote_similarity_pk_order():
"""Pairs of quotes must be ordered by PK."""
# Small/big in terms of their IDs.
small_quote = models.Quote.objects.create(cont | ent='foo', pk=500)
big_quote = mod | els.Quote.objects.create(content='bar', pk=505)
# Wrong order raises an exception.
with pytest.raises(exceptions.ValidationError):
models.QuoteSimilarity.objects.create(
quote_1=big_quote, quote_2=small_quote, score=0.3)
# Good order works fine.
models.QuoteSimilarity.objects.create(
quote_1=small_quote, quote_2=small_quote, score=0.3)
@pytest.mark.django_db
class TestQuoteSimilarityStore(object):
"""QuoteSimilarity.store sorts out which quote gets which number."""
@pytest.fixture(autouse=True)
def make_quotes(self):
"""Create small/big quote."""
# pylint: disable=attribute-defined-outside-init
self.small_quote = models.Quote.objects.create(content='foo', pk=500)
self.big_quote = models.Quote.objects.create(content='bar', pk=505)
def test_from_scratch(self):
"""Will create a new QuoteSimilarity instance properly."""
inst, created = models.QuoteSimilarity.store(
self.big_quote, self.small_quote, 0.3)
assert created
assert inst.quote_1 == self.small_quote
assert inst.quote_2 == self.big_quote
assert inst.score == 0.3
def test_updates_existing(self):
"""If an existing score is found, updates that instance."""
sim_score = models.QuoteSimilarity.objects.create(
quote_1=self.small_quote, quote_2=self.big_quote, score=0.4)
inst, created = models.QuoteSimilarity.store(
self.big_quote, self.small_quote, 0.9)
assert not created
assert inst.pk == sim_score.pk
assert inst.quote_1 == self.small_quote
assert inst.quote_2 == self.big_quote
assert inst.score == 0.9
|
antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_LinearTrend/cycle_5/ar_/test_artificial_32_RelativeDifference_LinearTrend_5__0.py | Python | bsd-3-clause | 273 | 0.084249 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_l | ength = 5, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = | 0); |
addiks/gmattermost | src/Application.py | Python | gpl-3.0 | 3,930 | 0.003562 | # Copyright (C) 2017 Gerrit Addiks <gerrit@addiks.net>
# https://github.com/addiks/gedit-phpide
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along wi | th this program. If not, see <http://www.gnu.org/licenses/>.
import gi
gi.require_version('Notify', '0.7')
from gi.repository import Gio, Gtk, Notify
from os.path import dirname
import os
from .Controller.TeamsListController import TeamsListController
f | rom .Controller.IndicatorController import IndicatorController
from .Model.ProfileModel import ProfileModel
from .Model.CacheModel import CacheModel
from Mattermost.ServerModel import ServerModel
class Application(Gtk.Application):
__profileModel = None # ProfileModel
__cacheModel = None # CacheModel
__indicatorController = None # IndicatorController
__teamsListController = None # TeamsListController
__assetPath = None
__servers = {}
def __init__(self):
Gtk.Application.__init__(
self,
application_id="de.addiks.gmattermost",
flags=Gio.ApplicationFlags.FLAGS_NONE
)
self.connect("activate", self.onActivate)
Notify.init("gmattermost")
assetPath = dirname(dirname(__file__)) + "/assets/"
profilePath = os.path.expanduser("~/.local/share/gmattermost/profile.xml")
profileModel = ProfileModel(profilePath)
cacheDir = os.path.expanduser("~/.local/share/gmattermost/cache/")
cacheModel = CacheModel(cacheDir)
self.__cacheModel = cacheModel
self.__profileModel = profileModel
self.__assetPath = assetPath
self.__indicatorController = IndicatorController(self)
self.hold()
def shutdown(self):
self.release()
def onActivate(self, app):
profile = self.__profileModel
if profile.getShowOnStartup() or True:
self.showTeamsListWindow()
def showTeamsListWindow(self, force=False, doStartup=True):
if self.__teamsListController is None:
self.__teamsListController = TeamsListController(self)
self.__teamsListController.show(force, doStartup)
def resetTeamsListWindow(self):
self.__teamsListController = None
def getAssetPath(self):
return self.__assetPath
def getProfileModel(self):
return self.__profileModel
def getServerModel(self, url):
if url not in self.__servers:
self.__servers[url] = ServerModel(url)
return self.__servers[url]
def getCache(self, cacheKey):
cacheModel = self.__cacheModel
return cacheModel.get(cacheKey)
def putCache(self, cacheKey, content):
cacheModel = self.__cacheModel
cacheModel.put(cacheKey, content)
def getCacheFilePath(self, cacheKey):
cacheModel = self.__cacheModel
return cacheModel.getCacheFilePath(cacheKey)
def createGladeBuilder(self, name):
assetPath = self.__assetPath
gladeFilePath = assetPath + "glade/" + name + ".glade"
gladeBuilder = Gtk.Builder()
gladeBuilder.add_from_file(gladeFilePath)
return gladeBuilder
def createStyleProvider(self, name):
assetPath = self.__assetPath
cssFilePath = assetPath + "styles/" + name + ".css"
cssFile = Gio.File.new_for_path(cssFilePath)
cssProvider = Gtk.CssProvider()
cssProvider.load_from_file(cssFile)
return cssProvider
|
a-shar/web_tech | ask/qa/forms.py | Python | bsd-3-clause | 1,986 | 0.002672 | # coding=utf-8
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from qa.models import Question, Answer
class AskForm(forms.Form):
title = forms.CharField(max_length=1024, label="Заголовок вопроса")
text = forms.CharField(widget=forms.Textarea, label="Текст вопроса")
def clean_title(self):
title = self.cleaned_data['title']
if not title:
raise forms.ValidationError(u'Заголовок не может быть пустым', code=12)
return title
def clean_text(self):
text = self.cleaned_data['text']
if not text:
raise forms.ValidationError(u'Вопрос не может быть пустым', code=13)
return text
def save(self):
question = Question(**self.cleaned_data)
question.author = self.author
question.save()
return question
class AnswerForm(forms.ModelForm):
class Meta:
model = Answer
fields = ('text', 'question')
labels = {
'text': 'Ответ',
}
widgets = {
"question" : forms.HiddenInput(),
}
# text = forms.CharField(widget=forms.Textarea, label="Текст ответа")
# question = forms.IntegerField(widget=forms.HiddenInput(), label=None)
#
# def clean_que | stion(self):
# return self.cleaned_data["question"]
#
# def clean_text(self):
# text = self.cleaned_data['text']
# if not text:
# raise forms.ValidationError(u'Ответ не может быть пустым', code=13)
# return text
#
# def save(self):
# answer = Answer(**self.cleaned_data)
# answer.author = User.objects.get(pk=1)
# answer.save()
# return answer
class SignupForm | (UserCreationForm):
class Meta:
model = User
fields = ("username","email",)
|
alvaroribas/modeling_TDs | data_converter.py | Python | mit | 7,982 | 0.017665 | ######## Script to convert IRS spectra into pseudophotometric
######## datapoints for modeling the TDs
import asciitable
import numpy as np
import matplotlib.pyplot as plt
import pyfits
from scipy import interpolate
def remove_duplicates_func(seq):
""" This function takes a list and returns
the same without duplicate elements."""
seen = set()
seen_add = seen.add
return [ x for x in seq if x not in seen and not seen_add(x)]
#####
avs_dictionary = {}
avs_dictionary['CS_Cha'] = 0.25
avs_dictionary['SZ_Cha'] = 1.90
avs_dictionary['T25'] = 0.78
avs_dictionary['T35'] = 3.5
avs_dictionary['T56'] = 0.23
avs_dictionary['ISO_52'] = 1.3
obj = 'T25'
av = avs_dictionary[obj]
# path informations
path_main = '../objects/'
path_object = path_main + obj + '/' + obj +'_data/'
# Read the information of the filters
filters_info=asciitable.read(path_main+'filters_info.txt')
total_filters_names=filters_info['Filter']
total_filters_lmb=filters_info['Lmb'] #Armstrongs
total_filters_av_almb=filters_info['Av/Alambda']
total_filters_zp=filters_info['ZP']
# read the phot info and get it in right units
phot_file = asciitable.read(path_object + obj +'_phot.txt')
filters = phot_file['Filter']
lmb = phot_file['Lmb']
fluxes = phot_file['Value']
errors = phot_file['Error']
units = phot_file['Units']
detections = phot_file['Detection']
zp = phot_file['ZP']
filters_av_almb = list()
filters_zp = list()
for element in phot_file:
index = np.where(total_filters_names == element['Filter'])[0][0]
filters_av_almb.append(total_filters_av_almb[index])
filters_zp.append(total_filters_zp[index])
filters_av_almb = np.array(filters_av_almb)
filters_zp = np.array(filters_zp)
# convert to jy
indexes_conversion = np.where(phot_file['Units'] == 'mag')[0]
error_fractions = errors[indexes_conversion]
fluxes[indexes_conversion] = zp[indexes_conversion] * 10 ** (-0.4 * fluxes[indexes_conversion])
errors[indexes_conversion] = error_fractions * fluxes[indexes_conversion]
# derreden
fluxes = fluxes / 10**(-0.4 * av * filters_av_almb)
errors = errors / 10**(-0.4 * av * filters_av_almb)
# convert to flmb, fluxes in Jy, wavelength in microns
fluxes = (3e-8 * fluxes*1e3 / (lmb*1e4)**2)
errors = (3e-8 * errors*1e3 / (lmb*1e4)**2)
# Convert now to lmbflmb, fluxes in erg/cm2/s/A, wavelength in microns
lmb_flmb = fluxes * lmb * 1e4 #set lmb to A
lmb_flmb_e = errors * lmb * 1e4 #set lmb to A
indexes_upper = np.where(detections == 0)[0]
lmb_flmb_e[indexes_upper] = 0.
file_to_write = open(path_object + obj +'_processed.txt','w')
file_to_write.write('#Filter\tLmb[um]\tLmb_flmb[erg/cm2/s]\tLmb_flmb_err[erg/cm2/s]\tDetection\n')
for index in xrange(len(fluxes)):
file_to_write.write(filters[index]+'\t')
file_to_write.write('{:.3e}\t'.format(lmb[index]))
file_to_write.write('{:.3e}\t'.format(lmb_flmb[index]))
file_to_write.write('{:.3e}\t'.format(lmb_flmb_e[index]))
file_to_write.write('{:}\n'.format(detections[index]))
file_to_write.close()
file_lmb = open(path_object + obj + '.lambda','w')
lmb_unique = remove_duplicates_func(lmb)
lmb_unique = np.sort(lmb_unique)
for element in lmb_unique:
file_lmb.write('{:.4e}\n'.format(element))
file_lmb.close()
##########################
## IRS Spectrum
| # Derredening data
# Mathis1 | 990 extinction law for spitzer (Rv=5)
mathis_lmb=[2.2,3.4,5.,7.,9.,9.7,10.,12.,15.,18.,20.,25.,35.]
mathis_alambda_aj=[0.382,0.182,0.095,0.07,0.157,0.2,0.192,0.098,0.053,0.083,0.075,0.0048,0.013]
mathis_interpol=interpolate.interp1d(mathis_lmb,mathis_alambda_aj,kind='linear')
#McClure2009 extinction law (lmb in microns)
mcclure=pyfits.open(path_main + 'McClure2009.fits')
mcclure_lmb=mcclure[1].data['lambda']
mcclure_alambda_ak1=mcclure[1].data['Al/AK1']
mcclure_alambda_ak2=mcclure[1].data['Al/AK2']
indexes=[(mcclure_lmb < 36) & (mcclure_lmb > 4)]
mcclure_lmb=mcclure_lmb[indexes]
mcclure_alambda_ak1=mcclure_alambda_ak1[indexes]
mcclure_alambda_ak2=mcclure_alambda_ak2[indexes]
mcclure_interpol1=interpolate.interp1d(mcclure_lmb,mcclure_alambda_ak1,kind='linear')
mcclure_interpol2=interpolate.interp1d(mcclure_lmb,mcclure_alambda_ak2,kind='linear')
irs=pyfits.open(path_object + obj +'_IRS.fits')
spectrum=irs[0].data
irs_lmb=spectrum[:,0] ## In microns
irs_fnu=spectrum[:,1]
irs_fnu_err = np.sqrt(spectrum[:,2]**2 + spectrum[:,3]**2 + spectrum[:,4]**2)
# get the errors in relative error
irs_fnu_rel_err = irs_fnu_err / irs_fnu
# cut the order1 between 7-14 and 20.5 - 35 microns
order1=[(spectrum[:,8] == 1) & (((irs_lmb > 7.6) &(irs_lmb < 14.)) | ((irs_lmb > 20.5) & (irs_lmb < 35.)))]
# cut the order2 up to 20.5 microns
order2=[(spectrum[:,8] == 2) & (irs_lmb < 20.5)]
# get to corresponding values and sort them
lmb1=irs_lmb[order1]
lmb2=irs_lmb[order2]
irs_fnu1=irs_fnu[order1]
irs_fnu2=irs_fnu[order2]
irs_lmb=np.concatenate((lmb1,lmb2),axis=0)
irs_fnu=np.concatenate((irs_fnu1,irs_fnu2),axis=0)
irs_fnu=irs_fnu[np.argsort(irs_lmb)]
irs_lmb=irs_lmb[np.argsort(irs_lmb)]
#
aj=av*0.31
ak=av*0.13
print 'Aj:'+str(aj)
print 'Ak:'+str(ak)
# derreden
if (aj < 0.8):
coeffs=mathis_interpol(irs_lmb)
almbs=coeffs*aj
irs_fnu_der=irs_fnu*10.**(-0.4*(-almbs))
if (aj > 0.8 and ak < 1):
coeffs=mcclure_interpol1(irs_lmb)
almbs=coeffs*ak
irs_fnu_der=irs_fnu*10.**(-0.4*(-almbs))
else:
coeffs=mcclure_interpol2(irs_lmb)
almbs=coeffs*ak
irs_fnu_der=irs_fnu*10.**(-0.4*(-almbs))
print 'Min:'+str(almbs.min())+'/Max:'+str(almbs.max())
# convert from Jy to erg/cm2/s/Hz
irs_fnu_der=irs_fnu_der*1e-23
# convert to erg/cm2/s/A
irs_flmb=irs_fnu_der*3.e8*1.e2/irs_lmb**2
# sort everything and get lmb_flmb
indexes_real = np.isfinite(irs_flmb)
irs_lmb = irs_lmb[indexes_real]
irs_flmb = irs_flmb[indexes_real]
irs_fnu_rel_err = irs_fnu_rel_err[indexes_real]
irs_lmbflmb = irs_lmb *1e4 * irs_flmb # irs_lmb in microns
# bring back the errors, only when the errors are real too
indexes_real = np.isfinite(irs_fnu_rel_err)
irs_lmb = irs_lmb[indexes_real]
irs_lmbflmb = irs_lmbflmb[indexes_real]
irs_fnu_rel_err = irs_fnu_rel_err[indexes_real]
irs_lmbflmb_err = irs_lmbflmb * irs_fnu_rel_err
# now bin everything
n_bins = 10
len_bins = np.int(np.floor(len(irs_lmb) / n_bins))
lmb_binned = list()
lmbflmb_binned = list()
lmbflmb_err_binned = list()
for n_bin in xrange(n_bins):
# compute indexes for binning
indexes_bin = np.arange(n_bin * len_bins, (n_bin+1) * len_bins)
# in the last case, take the remaining datapoints in the last bin
if n_bin == n_bins-1:
indexes_bin = np.arange(n_bin * len_bins,len(irs_lmb))
lmb_value = np.mean(irs_lmb[indexes_bin])
lmbflmb_value = np.mean(irs_lmbflmb[indexes_bin])
lmbflmb_err_value = np.std(irs_lmbflmb[indexes_bin]) / np.sqrt(len(indexes_bin))
# append the results
lmb_binned.append(lmb_value)
lmbflmb_binned.append(lmbflmb_value)
lmbflmb_err_binned.append(lmbflmb_err_value)
# finally, convert it to np arrays
lmb_binned = np.array(lmb_binned)
lmbflmb_binned = np.array(lmbflmb_binned)
lmbflmb_err_binned = np.array(lmbflmb_err_binned)
# append the results to the files
file_to_write = open(path_object + obj +'_processed.txt','a')
file_lmb = open(path_object + obj + '.lambda','a')
for index in xrange(len(lmb_binned)):
file_lmb.write('{:.4e}\n'.format(lmb_binned[index]))
file_to_write.write('IRS_binned\t')
file_to_write.write('{:.3e}\t'.format(lmb_binned[index]))
file_to_write.write('{:.3e}\t'.format(lmbflmb_binned[index]))
file_to_write.write('{:.3e}\t'.format(lmbflmb_err_binned[index]))
file_to_write.write('1\n')
file_to_write.close()
file_lmb.close()
# OPTIONAL: plot to check
plot_to_check = True
if plot_to_check:
plt.errorbar(irs_lmb,irs_lmbflmb,yerr=irs_lmbflmb_err,fmt='o',mec=None, ms=1, mfc='blue')
plt.errorbar(lmb_binned,lmbflmb_binned,yerr=lmbflmb_err_binned,fmt='o',mfc='red',mec=None,ms=8,color='red')
plt.xscale('log')
plt.yscale('log')
plt.xlim(4,40)
plt.show()
|
mitou/meikan | updater.py | Python | mit | 3,661 | 0.001173 | # -*- coding: utf-8 -*-
"""
kintone上のデータを、バックアップを取ってから一括アップデートするスクリプト
オプション指定なし→ローカルキャッシュを用いてDry Run
-r(--real) →最新のデータを取得してバックアップし、更新
-f(--from-backup) →-rで問題が起きたとき用。バックアップを指定して、そのデータを元に更新する。
"""
from cache import get_all, get_app
import time
import argparse
from render import pretty
def concat_lines(x, y):
if isinstance(y, str): y = y.decode('utf-8')
return x.rstrip('\n') + '\n' + y
def add_a_tag(tags, a_tag, length=None):
assert not '\n' in a_tag
if length:
assert len(a_tag[1:].split(a_tag[0])) == length
tags.append(a_tag)
def rows_to_csv(rows):
import cStringIO
import unicodecsv as csv
f = cStringIO.StringIO()
csv.writer(f, encoding='utf-8').writerows(rows)
return f.getvalue()
def convert(xs, args):
"add new creators from 2015_creators_170113.csv"
import unicodecsv as csv
name2x = dict((x.name, x) for x in xs)
to_update = []
to_add = []
rd = csv.reader(file('2015_creators_170113.csv'), encoding='utf-8')
for row in rd:
year = row[2]
kubun = row[3]
sc = row[4]
theme = row[5]
name = row[6]
pm = row[9]
affil1 = row[7]
affil2 = row[8]
if name in name2x:
x = name2x[name]
to_update.append(x)
else:
from mymodel import Person
x = Person()
x.name = name
to_add.append(x)
tags = [
["未踏採択", year, kubun, sc, theme, pm],
["所属", affil1, "{}年時点".format(year), affil2]]
tags = rows_to_csv(tags)
x.tags = concat_lines(x.tags, tags)
print name
print tags
return to_add, to_update
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--real', '-r',
action='store_true', help='read from kintone and write to kintone')
parser.add_argument(
'--from-backup', '-f',
action='store', help='read from backup and write to kintone')
parser.add_argument(
'--converter', '-c',
action='store', help='use specific converter')
parser.add_argument(
'--infile', '-i',
action='store', help='input file')
args = parser.parse_args()
if args.real:
dumpdir = time.strftime('backup_%m%d_%H%M')
xs = get_ | all(cache=False, name=dumpdir)
elif args.from_backup:
xs = get_all(cache=True, name=args.from_backup)
else:
xs = get_all(cache=True)
if not args.converter:
to_add, to_update = convert(xs, args)
else:
import imp
info = imp.find_module('converter/' + args.converter)
m = imp.loa | d_module('m', *info)
to_add, to_update = m.convert(xs, args)
print "{} items to update, {} items to add".format(len(to_update), len(to_add))
# when recover from backup we need to ignore revision
if args.from_backup:
for x in xs:
x.revision = -1 # ignore revision
if args.real or args.from_backup:
app = get_app()
result = app.batch_create(to_add)
assert result.ok
for i in range(0, len(to_update), 100):
print i, to_update[i].name
result = app.batch_update(to_update[i:i + 100])
assert result.ok
else:
# for debug: Run this script with `ipython -i`
globals()['xs'] = xs
if __name__ == '__main__':
main()
|
shengshuyang/StanfordCNNClass | shadow_project/extract_patches.py | Python | gpl-3.0 | 1,314 | 0.010654 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import nump | y as np
import os
from math import sqrt
from os.path import expanduser
def extract_patches(path, filename, out_path, patch_size, stride, visualize):
img = mpimg.imread(path+filename)
nRows, nCols, nColor = img.shape
psx, psy = patch_size
patches = []
for r in xrange(psy/2+1, nRows - psy/2 - 1, stride):
for c in xrange(psx/2+1, nCols - psx/2 - 1, stride):
patches.append(img[r-psy/2 : r + psy/2, c-psx/2 : c+psx/2, :])
grid_size = int(sqrt(len(patches)))
name, ext = os.path.splitext(filename)
for pos in xrange(len(patches)):
plt.imsave(out_path + name + "_" + str(pos) + ext, patches[pos])
if not visualize:
return
for pos in xrange(len(patches)):
if pos + 1 < grid_size ** 2:
plt.subplot(grid_size, grid_size, pos+1)
plt.imshow(patches[pos])
plt.axis('off')
if __name__ == "__main__":
home = expanduser("~")
nyu_path = home+'/IGNORE_NYU/jpgs/'
#extract_patches(, [16,16], 100, True)
for root, dirs, files in os.walk(nyu_path, topdown=False):
for filename in files:
extract_patches(nyu_path, filename, nyu_path+"/patches/", [64,64], 100, False)
| |
rkashapov/buildbot | master/buildbot/changes/github.py | Python | gpl-2.0 | 10,700 | 0.000093 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import iteritems
from datetime import datetime
from fnmatch import fnmatch
from twisted.internet import defer
from buildbot import config
from buildbot.changes import base
from buildbot.util import ascii2unicode
from buildbot.util import datetime2epoch
from buildbot.util import httpclientservice
from buildbot.util.logger import Logger
from buildbot.util.state import StateMixin
log = Logger()
HOSTED_BASE_URL = "https://api.github.com"
link_urls = {
"https": "clone_url",
"svn": "svn_url",
"git": "git_url",
"ssh": "ssh_url"
}
class PullRequestMixin(object):
def extractProperties(self, payload):
def flatten(properties, base, info_dict):
for k, v in iteritems(info_dict):
name = ".".join([base, k])
if isinstance(v, dict):
flatten(properties, name, v)
elif any([fnmatch(name, expr)
for expr in self.github_property_whitelist]):
properties[name] = v
properties = {}
flatten(properties, "github", payload)
return properties
class GitHubPullrequestPoller(base.ReconfigurablePollingChangeSource,
StateMixin, PullRequestMixin):
compare_attrs = ("owner", "repo", "token", "branches", "pollInterval",
"category", "pollAtLaunch", "name")
db_class_name = 'GitHubPullrequestPoller'
def __init__(self, owner, repo, **kwargs):
name = kwargs.get("name")
if not name:
kwargs["name"] = "GitHubPullrequestPoller:" + owner + "/" + repo
super(GitHubPullrequestPoller, self).__init__(owner, repo, **kwargs)
def checkConfig(self,
owner,
repo,
branches=None,
category='pull',
baseURL=None,
pullrequest_filter=True,
token=None,
magic_link=False,
repository_type="https",
github_property_whitelist=None,
**kwargs):
if repository_type not in ["https", "svn", "git", "ssh"]:
config.error(
"repository_type must be one of {https, svn, git, ssh}")
base.ReconfigurablePollingChangeSource.checkConfig(
self, name=self.name, **kwargs)
@defer.inlineCallbacks
def reconfigService(self,
owner,
repo,
branches=None,
pollInterval=10 * 60,
category=None,
baseURL=None,
pullrequest_filter=True,
token=None,
pollAtLaunch=False,
magic_link=False,
repository_type="https",
github_property_whitelist=None,
**kwargs):
yield base.ReconfigurablePollingChangeSource.reconfigService(
self, name=self.name, **kwargs)
if baseURL is None:
baseURL = HOSTED_BASE_URL
if baseURL.endswith('/'):
baseURL = baseURL[:-1]
http_headers = {'User-Agent': 'Buildbot'}
if token is not None:
http_headers.update({'Authorization': 'token ' + token})
self._http = yield httpclientservice.HTTPClientService.getService(
self.master, baseURL, headers=http_headers)
self.token = token
self.owner = owner
self.repo = repo
self.branches = branches
self.github_property_whitelist = github_property_whitelist
self.pollInterval = pollInterval
self.pollAtLaunch = pollAtLaunch
self.repository_type = link_urls[repository_type]
self.magic_link = magic_link
if github_property_whitelist is None:
self.github_property_whitelist = []
if callable(pullrequest_filter):
self.pullrequest_filter = pullrequest_filter
else:
self.pullrequest_filter = (lambda _: pullrequest_filter)
self.category = category if callable(category) else ascii2unicode(
category)
def describe(self):
return "GitHubPullrequestPoller watching the "\
"GitHub repository %s/%s" % (
self.owner, self.repo)
@defer.inlineCallbacks
def _getPullInformation(self, pull_number):
result = yield self._http.get('/'.join(
['/repos', self.owner, self.repo, 'pulls', str(pull_number)]))
my_json = yield result.json()
defer.returnValue(my_json)
@defer.inlineCallbacks
def _getPulls(self):
log.debug("GitHubPullrequestPoller: polling "
"GitHub repository %s/%s, branches: %s" %
(self.owner, self.repo, self.branches))
result = yield self._http.get('/'.join(
['/repos', self.owner, self.repo, 'pulls']))
my_json = yield result.json()
defer.returnValue(my_json)
@defer.inlineCallbacks
def _getEmail(self, user):
result = yield self._http.get("/".join(['/users', user]))
my_json = yield result.json()
defer.returnValue(my_json["email"])
@defer.inlineCallbacks
def _getFiles(self, prnumber):
result = yield self._http.get("/".join([
'/repos', self.owner, self.repo, 'pulls', str(prnumber), 'files'
]))
my_json = yield result.json()
defer.returnValue([f["filename"] for f in my_json])
@defer.inlineCallbacks
def _getCurrentRev(self, prnumber):
# Get currently assigned revision of PR number
result = yield self._getStateObjectId()
rev = yield self.master.db.state.getState(result, 'pull_request%d' %
prnumber, None)
defer.returnValue(rev)
@defer.inlineCallbacks
def _setCurrentRev(self, prnumber, rev):
# Set the updated revision for PR number.
result = yield self._getStateObjectId()
yield self.master.db.state.setState(result,
'pull_request%d' % prnumber, rev)
@defer.inlineCallbacks
def _getStateObjectId(self):
# Return a deferred for object id in state db.
result = yield self.master.db.state.getObjectId(
'%s/%s' % (self.owner, self.repo), self.db_class_name)
defer.returnValue(result)
@defer.inlineCallbacks
def _processChanges(self, github_result):
for pr in github_result:
# Track PRs for specified branches
base_branch = pr['base']['ref'] |
prnumber = pr['number']
revision = pr['head']['sha']
# Check to see if the branch is set or matches
if self.branches is | not None and base_branch not in self.branches:
continue
if (self.pullrequest_filter is not None and
not self.pullrequest_filter(pr)):
continue
current = yield self._getCurrentRev(prnumber)
if not current or current[0:12] != revision[0:12]:
# Access title, repo, html link, and comments
pr = yield self._getPullInformation(prnumber)
title = pr['title']
if self.m |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-512-transformer/transformer/data_generators/translate_test.py | Python | apache-2.0 | 2,128 | 0.007049 | """Translate generators test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tarfile
import tensorflow as tf
from data_generators import text_problems
from data_generators import translate
class TranslateTest(tf.test.TestCase):
DATASETS = [
["data1.tgz", ("train1.en", "train1.de")],
["data2.tgz", ("train2.en", "train2.de")],
["data3.tgz", ("train3.en", "train3.de")],
]
@classmethod
def setUpClass(cls):
tmp_dir = tf.test.get_temp_dir()
compressed_dir = os.path.join(tmp_dir, "compressed")
shutil.rmtree(tmp_dir)
tf.gfile.MakeDirs(compressed_dir)
en_data = [str(i) for i in range(10, 40)]
de_data = [str(i) for i in range(100, 130)]
data = list(zip(en_data, de_data))
for i, dataset in enumerate(cls.DATASETS):
tar_file = dataset[0]
en_file, de_file = [
os.path.join(compressed_dir, name) for name in datas | et[1]
]
with tf.gfile.Open(en_file, "w") as en_f:
with tf.gfile.Open(de_file, "w") as de_f:
start = i * 10
end = start + 10
for en_line, de_line in data[start:end]:
en_f.write(en_lin | e)
en_f.write("\n")
de_f.write(de_line)
de_f.write("\n")
with tarfile.open(os.path.join(tmp_dir, tar_file), "w:gz") as tar_f:
tar_f.add(en_file, os.path.basename(en_file))
tar_f.add(de_file, os.path.basename(de_file))
cls.tmp_dir = tmp_dir
cls.data = data
def testCompileData(self):
filename = "out"
filepath = os.path.join(self.tmp_dir, filename)
translate.compile_data(self.tmp_dir, self.DATASETS, filename)
count = 0
for i, example in enumerate(
text_problems.text2text_txt_iterator(filepath + ".lang1",
filepath + ".lang2")):
expected = self.data[i]
self.assertEqual(list(expected), [example["inputs"], example["targets"]])
count += 1
self.assertEqual(count, len(self.data))
if __name__ == "__main__":
tf.test.main()
|
boniatillo-com/PhaserEditor | docs/v2/conf.py | Python | epl-1.0 | 4,869 | 0.001643 | # -*- coding: utf-8 -*-
#
# Phaser Editor documentation build configuration file, created by
# sphinx-quickstart on Thu May 25 08:35:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
#'rinoh.frontend.sphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Phaser Editor 2D'
copyright = u'2016-2020, Arian Fornaris'
author = u'Arian Fornaris'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u | '2.1.7'
# The full version, including alpha/beta/rc tags.
release = u'2.1.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext | catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#import sphinx_rtd_theme
html_theme = "phaser-editor"
# Uncomment for generate Eclipse Offline Help
#html_theme = "eclipse-help"
html_theme_path = ["_themes"]
html_show_sourcelink = False
html_show_sphinx = False
html_favicon = "logo.png"
html_title = "Phaser Editor Help"
html_show_copyright = True
print(html_theme_path)
#html_theme = 'classic'
highlight_language = 'javascript'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PhaserEditordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': '',
# Latex figure (float) alignment
#
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PhaserEditor2D.tex', u'Phaser Editor 2D Documentation',
u'Arian Fornaris', 'manual'),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PhaserEditor2D', u'Phaser Editor 2D Documentation',
author, 'Arian', 'A friendly HTML5 game IDE.',
'Miscellaneous'),
]
|
aaronsw/watchdog | vendor/rdflib-2.4.0/rdflib/syntax/serializers/__init__.py | Python | agpl-3.0 | 449 | 0.004454 | from rdflib import URIRef
class Serializer(object):
def __init__(self, store):
self.store = store
self.encoding = "UTF-8"
self.base = None
def serialize(self, stream, base=None, encoding=None, **args):
"""Abstract method"""
def re | lativize(self, uri):
base | = self.base
if base is not None and uri.startswith(base):
uri = URIRef(uri.replace(base, "", 1))
return uri
|
dhamaniasad/gpicsync | geonames.py | Python | gpl-2.0 | 6,231 | 0.027283 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
#
# A tool to search for geonames metadata either:
# - the path of a geocoded picture
# - by giving a latitude and longitude values (decimal degrees format)
#
# (c) francois.schnell francois.schnell@gmail.com
# http://francois.schnell.free.fr
#
# This script is released under the GPL license v2
#
# More informations and help can be found here: http://code.google.com/p/gpicsync/
#
###############################################################################
from geoexif import *
from urllib2 import urlopen
import xml.etree.ElementTree as ET, re, decimal
from math import *
import unicodedata #test
import codecs#test
class Geonames(object):
def __init__(self,picName="",lat="",long=""):
"""
Either give the path to a geocoded picture or
give latitute/longitude strings
"""
self.lat=lat
self.long=long
self.picName=picName
if self.lat == "" and self.long == "":
mypicture=GeoExif(picName)
self.lat=mypicture.readLatitude()
self.long=mypicture.readLongitude()
print self.lat, self.long
print "latitude= ",self.lat," longitude= ",self.long
url= "http://ws.geonames.org/findNearbyPlaceName?lat="+str(self.lat)+"&lng="+str(self.long)+"&style=full"
print "url= ",url
self.page = codecs.getreader("utf-8")(urlopen(url)).read()
#print self.page
#print self.page.encode("utf8")
def searchTag(self,tag,page):
"""
Returns the content of a <tag> in the given page (string)
"""
content=re.search('(<'+tag+'>.*</'+tag+'>)',page).group()
content=content.split("<"+tag+">")[1].split("</"+tag+">")[0]
#return content
return unicode(content)
def findNearbyPlace(self):
""" find nearby place at geonames.org"""
self.nearbyPlace=self.searchTag("name",self.page)
#print self.nearbyPlace
#print self.nearbyPlace + " => " + unicodedata.normalize('NFKD', self.nearbyPlace).encode('ascii','ignore')
return self.nearbyPlace
def findNearbyPlaceLatLon(self):
""" Returns lat/long of the nearby place """
self.nearbyPlaceLat=self.searchTag("lat",self.page)
self.nearbyPlaceLon=self.searchTag("lng",self.page)
| return (self.nearbyPlaceLat,self.nearbyPlaceLon)
def findOrientation(self):
debug=False
nearbyPlaceLat=float(self.findNearbyPlaceLatLon()[0])
nearbyPlacelon=float(self.findNearbyPlaceLatLon()[1])
| deltaLat=float(self.lat)-nearbyPlaceLat
deltaLon=float(self.long)-nearbyPlacelon
situation=""
if debug==True:
print "nearbyPlaceLat, nearbyPlacelon", nearbyPlaceLat,nearbyPlacelon
print "GPS lat,lon",self.lat, self.long
print "deltaLat, deltaLon", deltaLat, deltaLon
print "(tan(pi/8)*deltaLon)", (tan(pi/8)*deltaLon)
print "(tan(3*pi/8)*deltaLon)", (tan(3*pi/8)*deltaLon)
print "angle in degrees",atan(deltaLon/deltaLat)*(360/(2*pi))
if (deltaLon >0) and (deltaLat >0):
if debug==True: print "In (deltaLon >0) and (deltaLat >0)"
if deltaLat <= tan(pi/8)*deltaLon: situation="East"
if (tan(pi/8)*deltaLon)<deltaLat<(tan(3*pi/8)*deltaLon) : situation="North-East"
if (tan(3*pi/8)*deltaLon)<= deltaLat<=(pi/2) : situation="North"
if (deltaLon >0) and (deltaLat <0):
if debug==True: print "In (deltaLon >0) and (deltaLat <0)"
if abs(deltaLat) <= tan(pi/8)*deltaLon: situation="East"
if (tan(pi/8)*deltaLon)<abs(deltaLat)<(tan(3*pi/8)*deltaLon) : situation="South-East"
if (tan(3*pi/8)*deltaLon)<= abs(deltaLat) <=(pi/2) : situation="South"
if (deltaLon <0) and (deltaLat >0):
if debug==True: print "In (deltaLon <0) and (deltaLat >0)"
if abs(deltaLat) <= abs(tan(pi/8)*deltaLon): situation="West"
if abs(tan(pi/8)*deltaLon)<abs(deltaLat)<abs(tan(3*pi/8)*abs(deltaLon)) : situation="North-West"
if abs(tan(3*pi/8)*deltaLon)<= abs(deltaLat)<=(pi/2) : situation="North"
if (deltaLon <0) and (deltaLat <0):
if debug==True: print "In (deltaLon <0) and (deltaLat <0)"
if abs(deltaLat) <= abs(tan(pi/8)*deltaLon): situation="West"
if abs(tan(pi/8)*deltaLon)<abs(deltaLat)<abs(tan(3*pi/8)*deltaLon) : situation="South-West"
if abs(tan(3*pi/8)*deltaLon)<= abs(deltaLat) <=(pi/2) : situation="South"
print situation
return situation
def findDistance(self):
"""find distance in km to nearby place"""
self.distance=self.searchTag("distance",self.page)
self.distance=decimal.Decimal(self.distance)
self.distance=str(self.distance.quantize(decimal.Decimal('0.01')))
print self.distance
return self.distance
def findCountry(self):
""" find country at geonames.org"""
self.countryName=self.searchTag("countryName",self.page)
print self.countryName
return self.countryName
def findCountryCode(self):
""" find country code, example France= FR"""
self.countryCode=self.searchTag("countryCode",self.page)
print self.countryCode
return self.countryCode
def findRegion(self):
""" find region (adminName1) at geonames.org"""
self.regionName=self.searchTag("adminName1",self.page)
print self.regionName
return self.regionName
if __name__=="__main__":
#nearby=Geonames(picName="test.jpg")
nearby=Geonames(lat="32.684393300",long="34.962920000")
#nearby=Geonames(lat="11.2183076664",long="-85.6129039998")
#nearby=Geonames(lat="48.338236",long="11.969516") #alsace
nearby.findNearbyPlace()
nearby.findDistance()
nearby.findCountry()
nearby.findRegion()
nearby.findOrientation()
nearby.findCountryCode()
|
shivaenigma/pycoin | pycoin/networks/__init__.py | Python | mit | 426 | 0.004695 |
from .registry import ( # noqa
register_network, network_for_netcode, network_codes, network_prefixes,
network_name_for_netcode, subnet_name_for_netcode, full_ | network_name_for_netcode,
wif_prefix_for_netcode, address_prefix_for_netcode, pay_to_script_prefix_for_netcode,
prv32_prefix_for_netcode, pub32_prefix_for_netcode, bech32_hrp_for_netcode,
pay_ | to_script_wit_for_netcode, address_wit_for_netcode
)
|
KRHS-GameProgramming-2014/Arkansas-Smith | StartBlock.py | Python | bsd-2-clause | 390 | 0.087179 | import pygame
class StartBlock(pygame.sprite.Spri | te):
def __init__(self, pos = [0,0]):
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("Art/EnterBlock.png")
self.rect = self.image.get_rect()
self.place(pos)
self.living = True
def place(self, pos):
self.rect.topleft = po | s
def update(*args):
self = args[0]
|
pritha-srivastava/sm | drivers/lvhdutil.py | Python | lgpl-2.1 | 13,256 | 0.005432 | #!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Helper functions for LVHD SR. This module knows about RAW and VHD VDI's
that live in LV's."""
import os
import sys
import time
import util
import vhdutil
import xs_errors
from lock import Lock
from refcounter import RefCounter
MSIZE_MB = 2 * 1024 * 1024 # max virt size for fast resize
MSIZE = long(MSIZE_MB * 1024 * 1024)
VG_LOCATION = "/dev"
VG_PREFIX = "VG_XenStorage-"
LVM_SIZE_INCREMENT = 4 * 1024 * 1024
LV_PREFIX = {
vhdutil.VDI_TYPE_VHD : "VHD-",
vhdutil.VDI_TYPE_RAW : "LV-",
}
VDI_TYPES = [ vhdutil.VDI_TYPE_VHD, vhdutil.VDI_TYPE_RAW ]
JRN_INFLATE = "inflate"
JVHD_TAG = "jvhd"
LOCK_RETRY_ATTEMPTS = 20
# ref counting for VDI's: we need a ref count for LV activation/deactivation
# on the master
NS_PREFIX_LVM = "lvm-"
class VDIInfo:
uuid = ""
scanError = False
vdiType = None
lvName = ""
sizeLV = -1
sizeVirt = -1
lvActive = False
lvOpen = False
lvReadonly = False
hidden = False
parentUuid = ""
def __init__(self, uuid):
self.uuid = uuid
def matchLV(lvName):
"""given LV name, return the VDI type and the UUID, or (None, None)
if the name doesn't match any known type"""
for vdiType in VDI_TYPES:
prefix = LV_PREFIX[vdiType]
if lvName.startswith(prefix):
return (vdiType, lvName.replace(prefix, ""))
return (None, None)
def extractUuid(path):
uuid = os.path.basename(path)
if uuid.startswith(VG_PREFIX):
# we are dealing with realpath
uuid = uuid.replace("--", "-")
uuid.replace(VG_PREFIX, "")
for t in VDI_TYPES:
if uuid.find(LV_PREFIX[t]) != -1:
uuid = uuid.split(LV_PREFIX[t])[-1]
uuid = uuid.strip()
# TODO: validate UUID format
return uuid
return None
def calcSizeLV(sizeVHD):
return util.roundup(LVM_SIZE_INCREMENT, sizeVHD)
def calcSizeVHDLV(sizeVirt):
# all LVHD VDIs have the metadata area preallocated for the maximum
# possible virtual size (for fast online VDI.resize)
metaOverhead = vhdutil.calcOverheadEmpty(MSIZE)
bitmapOverhead = vhdutil.calcOverheadBitmap(sizeVirt)
return calcSizeLV(sizeVirt + metaOverhead + bitmapOverhead)
def getLVInfo(lvmCache, lvName = None):
"""Load LV info for all LVs in the VG or an individual LV.
This is a wrapper for lvutil.getLVInfo that filters out LV's that
are not LVHD VDI's and adds the vdi_type information"""
allLVs = lvmCache.getLVInfo(lvName)
lvs = dict()
for lvName, lv in allLVs.iteritems():
vdiType, uuid = matchLV(lvName)
if not vdiType:
continue
lv.vdiType = vdiType
lvs[uuid] = lv
return lvs
def getVDIInfo(lvmCache):
"""Load VDI info (both LV and if the VDI is not raw, VHD info)"""
vdis = {}
lvs = getLVInfo(lvmCache)
haveVHDs = False
for uuid, lvInfo in lvs.iteritems():
if lvInfo.vdiType == vhdutil.VDI_TYPE_VHD:
haveVHDs = True
vdiInfo = VDIInfo(uuid)
vdiInfo.vdiType = lvInfo.vdiType
vdiInfo.lvName = lvInfo.name
vdiInfo.sizeLV = lvInfo.size
vdiInfo.sizeVirt = lvInfo.size
vdiInfo.lvActive = lvInfo.active
vdiInfo.lvOpen = lvInfo.open
vdiInfo.lvReadonly = lvInfo.readonly
vdiInfo.hidden = lvInfo.hidden
vdis[uuid] = vdiInfo
if haveVHDs:
pattern = "%s*" % LV_PREFIX[vhdutil.VDI_TYPE_VHD]
vhds = vhdutil.getAllVHDs(pattern, extractUuid, lvmCache.vgName)
uuids = vdis.keys()
for uuid in uuids:
vdi = vdis[uuid]
if vdi.vdiType == vhdutil.VDI_TYPE_VHD:
if not vhds.get(uuid):
lvmCache.refresh()
if lvmCache.checkLV(vdi.lvName):
util.SMlog("*** VHD info missing: %s" % uuid)
vdis[uuid].scanError = True
else:
util.SMlog("LV disappeared since last scan: %s" % uuid)
del vdis[uuid]
elif vhds[uuid].error:
util.SMlog("*** vhd-scan error: %s" % uuid)
vdis[uuid].scanError = True
else:
vdis[uuid].sizeVirt = vhds[uuid].sizeVirt
vdis[uuid].parentUuid = vhds[uuid].parentUuid
vdis[uuid].hidden = vhds[uuid].hidden
return vdis
def inflate(journaler, srUuid, vdiUuid, size):
"""Expand a VDI LV (and its VHD) to 'size'. If the LV is already bigger
than that, it's a no-op. Does not change the virtual size of the VDI"""
lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid
vgName = VG_PREFIX + srUuid
path = os.path.join(VG_LOCATION, vgName, lvName)
lvmCache = journaler.lvmCache
currSizeLV = lvmCache.getSize(lvName)
newSize = calcSizeLV(size)
if newSize <= currSizeLV:
return
journaler.create(JRN_INFLATE, vdiUuid, str(currSizeLV))
util.fistpoint.activate("LVHDRT_inflate_after_create_journal",srUuid)
lvmCache.setSize(lvName, newSize)
util.fistpoint.activate("LVHDRT_inflate_after_setSize",srUuid)
if not util.zeroOut(path, newSize - vhdutil.VHD_FOOTER_SIZE,
vhdutil.VHD_FOOTER_SIZE):
raise Exception('failed to zero out VHD footer')
util.fistpoint.activate("LVHDRT_inflate_after_zeroOut",srUuid)
vhdutil.setSizePhys(path, newSize, False)
util.fistpoint.activate("LVHDRT_inflate_after_setSizePhys",srUuid)
journaler.remove(JRN_INFLATE, vdiUuid)
def deflate(lvmCache, lvName, size):
"""Shrink the LV and the VHD on it to 'size'. Does not change the
virtual size of the VDI"""
currSizeLV = lvmCache.getSize(lvName)
newSize = calcSizeLV(size)
if newSize >= currSizeLV:
return
path = os.path.join(VG_LOCATION, lvmCache.vgName, lvName)
# no undo necessary if this fails at any point between now and the end
vhdutil.setSizePhys(path, newSize)
lvmCache.setSize(lvName, newSize)
def setSizeVirt(journaler, srUuid, vdiUuid, size, jFile):
"""When resizing the VHD virtual size, we might have to inflate the LV in
case the metadata size increases"""
lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid
vgName = VG_PREFIX + srUuid
path = os.path.join(VG_LOCATION, vgName, lvName)
inflate(journaler, srUuid, vdiUuid, calcSizeVHDLV( | size))
vhdutil.setSizeVirt(path, size, jFile)
def _tryAcquire(lock):
"""We must give up if the SR is locked because it could be locked by the
coalesce thread trying to acquire the VDI lock we're holding, so as to
avoid deadlock | """
for i in range(LOCK_RETRY_ATTEMPTS):
gotLock = lock.acquireNoblock()
if gotLock:
return
time.sleep(1)
raise util.SRBusyException()
def attachThin(journaler, srUuid, vdiUuid):
"""Ensure that the VDI LV is expanded to the fully-allocated size"""
lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid
vgName = VG_PREFIX + srUuid
lock = Lock(vhdutil.LOCK_TYPE_SR, srUuid)
lvmCache = journaler.lvmCache
_tryAcquire(lock)
lvmCache.refresh()
vhdInfo = vhdutil.getVHDInfoLVM(lvName, extractUuid, vgName)
newSize = calcSizeVHDLV(vhdInfo.sizeVirt)
currSizeLV = lvmCache.getSize(lvName)
if newSize <= currSizeLV:
return
lvmCache.activate(NS_PREFIX_LVM + srUuid, v |
h31nr1ch/Mirrors | c/OtherProblems/patinhos-2334.py | Python | gpl-3.0 | 123 | 0.02439 | while(True):
n=int(input())
| if n==-1:
break
elif n==0:
print("0")
| else:
print(n-1)
|
parisots/population-gcn | fetch_data.py | Python | gpl-3.0 | 2,398 | 0.00417 | # Copyright (C) 2017 Sarah Parisot <s.parisot@imperial.ac.uk>, , Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
from nilearn import datasets
import ABIDEParser as Reader
import os
import shutil
# Selected pipeline
pipeline = 'cpac' |
# Input data variables
num_subjects = 871 # Number of subjects
root_folder = '/path/to/data/'
data_folder = os.path.join(root_folder, 'ABIDE_pcp/cpac/filt_noglobal')
# Files to fetch
files = ['rois_ho']
filemapping = {'func_preproc': 'func_preproc.nii.gz',
'rois_ho': 'rois_ho.1D'}
if not os.path.exists(data_folder): os.makedirs(data_folder)
shutil.copyfile('./subject_IDs.txt', os.path.join(data_folder, 'subject_IDs.txt'))
# Download database files
abide = datasets.fetch_abide_pcp(data_dir=root_folder, n_subjects=num_subjects, pipeline=pipeline,
band_pass_filtering=True, global_signal_regression=False, derivatives=files)
subject_IDs = Reader.get_ids(num_subjects)
subject_IDs = subject_IDs.tolist()
# Create a folder for each subject
for s, fname in zip(subject_IDs, Reader.fetch_filenames(subject_IDs, files[0])):
subject_folder = os.path.join(data_folder, s)
if not os.path.exists(subject_folder):
os.mkdir(subject_folder)
# Get the base filename for each subject
base = fname.split(files[0])[0]
# Move each subject file to the subject folder
for fl in files:
if not os.path.exists(os.path.join(subject_folder, base + filemapping[fl])):
shutil.move(base + filemapping[fl], subject_folder)
time_series = Reader.get_timeseries(subject_IDs, 'ho')
# Compute and save connectivity matrices
for i in range(len(subject_IDs)):
Reader.subject_connectivity(time_series[i], subject_IDs[i], 'ho', 'correlation')
|
yogesh2021/qds-sdk-py | qds_sdk/commands.py | Python | apache-2.0 | 47,582 | 0.003363 | """
The commands module contains the base definition for
a generic Qubole command and the implementation of all
the specific commands
"""
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
from qds_sdk.exception import ParseError
from qds_sdk.account import Account
from qds_sdk.util import GentleOptionParser
from qds_sdk.util import OptionParsingError
from qds_sdk.util import OptionParsingExit
from optparse import SUPPRESS_HELP
import boto
import time
import logging
import sys
import re
import pipes
import os
import json
log = logging.getLogger("qds_commands")
# Pattern matcher for s3 path
_URI_RE = re.compile(r's3://([^/]+)/?(.*)')
class Command(Resource):
"""
qds_sdk.Command is the base Qubole command class. Different types of Qubole
commands can subclass this.
"""
"""all commands use the /commands endpoint"""
rest_entity_path = "commands"
@staticmethod
def is_done(status):
"""
Does the status represent a completed command
Args:
`status`: a status string
Returns:
True/False
"""
return status == "cancelled" or status == "done" or status == "error"
@staticmethod
def is_success(status):
return status == "done"
@classmethod
def create(cls, **kwargs):
"""
Create a command object by issuing a POST request to the /command endpoint
Note - this does not wait for the command to complete
Args:
`**kwargs`: keyword arguments specific to command type
Returns:
Command object
"""
conn = Qubole.agent()
if kwargs.get('command_type') is None:
kwargs['command_type'] = cls.__name__
if kwargs.get('tags') is not None:
kwargs['tags'] = kwargs['tags'].split(',')
return cls(conn.post(cls.rest_entity_path, data=kwargs))
@classmethod
def run(cls, **kwargs):
"""
Create a command object by issuing a POST request to the /command endpoint
Waits until the command is complete. Repeatedly polls to check status
Args:
`**kwargs`: keyword arguments specific to command type
Returns:
Command object
"""
cmd = cls.create(**kwargs)
while not Command.is_done(cmd.status):
time.sleep(Qubole.poll_interval)
cmd = cls.find(cmd.id)
return cmd
@classmethod
def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {"status": "kill"}
return conn.put(cls.element_path(id), data)
def cancel(self):
"""
Cancels command represented by this object
"""
self.__class__.cancel_id(self.id)
@classmethod
def get_log_id(cls, id):
"""
Fetches log for the command represented by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
r = conn.get_raw(cls.element_path(id) + "/logs")
return r.text
def get_log(self):
"""
Fetches log for the command represented by this object
Returns:
The log as a string
"""
log_path = self.meta_data['logs_resource']
conn = Qubole.agent()
r = conn.get_raw(log_path)
return r.text
@classmethod
| def get_jobs_id(cls, id):
"""
Fetches information about the hadoop jobs which were started by this
command id. This information is only available for commands which have
completed (i.e. Status = 'done', 'cancelled' or 'error'.) Also, the
cluster which ran this command should be running for this information
to be available. Otherwise only the URL and job_id is shown.
Args:
`id`: command id
"""
conn = Qubol | e.agent()
r = conn.get_raw(cls.element_path(id) + "/jobs")
return r.text
def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True):
"""
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
"""
result_path = self.meta_data['results_resource']
conn = Qubole.agent()
r = conn.get(result_path, {'inline': inline})
if r.get('inline'):
if sys.version_info < (3, 0, 0):
fp.write(r['results'].encode('utf8'))
else:
import io
if isinstance(fp, io.TextIOBase):
fp.buffer.write(r['results'].encode('utf8'))
elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase):
fp.write(r['results'].encode('utf8'))
else:
# Can this happen? Don't know what's the right thing to do in this case.
pass
else:
if fetch:
acc = Account.find()
boto_conn = boto.connect_s3(aws_access_key_id=acc.storage_access_key,
aws_secret_access_key=acc.storage_secret_key)
log.info("Starting download from result locations: [%s]" % ",".join(r['result_location']))
#fetch latest value of num_result_dir
num_result_dir = Command.find(self.id).num_result_dir
for s3_path in r['result_location']:
# In Python 3,
# If the delim is None, fp should be in binary mode because
# boto expects it to be.
# If the delim is not None, then both text and binary modes
# work.
_download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim)
else:
fp.write(",".join(r['result_location']))
class HiveCommand(Command):
usage = ("hivecmd <submit|run> [options]")
optparser = GentleOptionParser(usage=usage)
optparser.add_option("-q", "--query", dest="query", help="query string")
optparser.add_option("-f", "--script_location", dest="script_location",
help="Path where hive query to run is stored. Can be S3 URI or local file path")
optparser.add_option("--macros", dest="macros",
help="expressions to expand macros used in query")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--sample_size", dest="sample_size",
help="size of sample in bytes on which to run query")
optparser.add_option("--cluster-label", dest="label",
help="the label of the cluster to run the command on")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--name", dest="name",
help="Assign a name to this query")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
@classmethod
def parse(cls, args):
"""
Parse command line argu |
deisi/home-assistant | homeassistant/components/frontend/version.py | Python | mit | 226 | 0 | "" | "DO NOT MODIFY. Auto-generated by build_frontend script."""
CORE = "7d80cc0e4dea6bc20fa2889be0b3cd15"
UI = "805f8dda70419b26daabc8e8f625127f"
MAP = "c922306de24140afd14f857f927bf8f0 | "
DEV = "b7079ac3121b95b9856e5603a6d8a263"
|
mozilla/firefox-flicks | flicks/videos/migrations/0020_auto__add_field_video2013_created.py | Python | bsd-3-clause | 7,284 | 0.008375 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Video2013.created'
db.add_column('videos_video2013', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 2, 5, 0, 0), auto_now_add=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Video2013.created'
db.delete_column('videos_video2013', 'created')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.award': {
'Meta': {'object_name': 'Award'},
'award_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preview': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video2012']", 'null': 'True', 'blank': 'True'})
},
'videos.video2012': {
'Meta': {'object_name': 'Video2012'},
'bitly_link_db': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 28, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judge_mark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'shortlink': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unsent'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'upload_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'user_country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'views': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'votes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'videos.video2013': {
'Meta': {'object_name': 'Video2013'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 5, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField | ', [], {'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed': ('django.db.model | s.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'user_notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'vimeo_id': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['videos'] |
T3CHNOLOG1C/Kurisu | addons/memes.py | Python | apache-2.0 | 7,307 | 0.001096 | import discord
from discord.ext import commands
from sys import argv
class Memes:
"""
Meme commands
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
async def _meme(self, ctx, msg):
author = ctx.message.author
if ctx.message.channel.name[0:5] == "help-" or "assistance" in ctx.message.channel.name or (self.bot.nomemes_role in author.roles):
await self.bot.delete_message(ctx.message)
try:
await self.bot.send_message(author, "Meme commands are disabled in this channel, or your privileges have been revoked.")
except discord.errors.Forbidden:
await self.bot.say(author.mention + " Meme commands are disabled in this channel, or your privileges have been revoked.")
else:
await self.bot.say(self.bot.escape_name(ctx.message.author.display_name) + ": " + msg)
# list memes
@commands.command(name="listmemes", pass_context=True)
async def _listmemes(self, ctx):
"""List meme commands."""
# this feels wrong...
funcs = dir(self)
msg = "```\n"
msg += ", ".join(func for func in funcs if func != "bot" and func[0] != "_")
msg += "```"
await self._meme(ctx, msg)
# 3dshacks memes
@commands.command(pass_context=True, hidden=True)
async def s_99(self, ctx):
"""Memes."""
await self._meme(ctx, "**ALL HAIL BRITANNIA!**")
@commands.command(pass_context=True, hidden=True)
async def screams(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/j0Dkv2Z.png")
@commands.command(pass_context=True, hidden=True)
async def eeh(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/2SBC1Qo.jpg")
@commands.command(pass_context=True, hidden=True)
async def dubyadud(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/Sohsi8s.png")
@commands.command(pass_context=True, hidden=True)
async def megumi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/GMRp1dj.jpg")
@commands.command(pass_context=True, hidden=True)
async def inori(self, ctx):
"""Memes."""
await self._meme(ctx, "https://i.imgur.com/WLncIsi.gif")
@commands.command(pass_context=True, hidden=True)
async def inori3(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/so8thgu.gifv")
@commands.command(pass_context=True, hidden=True)
async def inori4(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/267IXh1.gif")
@commands.command(pass_context=True, hidden=True)
async def inori5(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/lKcsiBP.png")
@commands.command(pass_context=True, hidden=True)
async def inori6(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/SIJzpau.gifv")
@commands.command(pass_context=True, hidden=True)
async def shotsfired(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/zf2XrNk.gifv")
@commands.command(pass_context=True, hidden=True)
async def rusure(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/dqh3fNi.png")
@commands.command(pass_context=True, hidden=True)
async def r34(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/sjQZKBF.gif")
@commands.command(pass_context=True, hidden=True)
async def lenny(self, ctx):
"""Memes."""
await self._meme(ctx, "( ͡° ͜ʖ ͡°)")
@commands.command(pass_context=True, hidden=True)
async def rip(self, ctx):
"""Memes."""
await self._meme(ctx, "Press F to pay respects.")
@commands.command(pass_context=True, hidden=True)
async def permabrocked(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/ARsOh3p.jpg")
@commands.command(pass_context=True, hidden=True)
async def knp(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/EsJ191C.png")
@commands.command(pass_context=True, hidden=True)
async def lucina(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/tnWSXf7.png")
@commands.command(pass_context=True, hidden= | True)
async def lucina2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/ZPMveve.jpg")
@commands.command(pass_context=True, hidde | n=True)
async def xarec(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/A59RbRT.png")
@commands.command(pass_context=True, hidden=True)
async def clap(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/UYbIZYs.gifv")
@commands.command(pass_context=True, hidden=True)
async def ayyy(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/bgvuHAd.png")
@commands.command(pass_context=True, hidden=True)
async def hazel(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/vpu8bX3.png")
@commands.command(pass_context=True, hidden=True)
async def thumbsup(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/hki1IIs.gifv")
# Cute commands :3
@commands.command(pass_context=True, hidden=True)
async def headpat(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/7V6gIIW.jpg")
@commands.command(pass_context=True, hidden=True)
async def headpat2(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/djhHX0n.gifv")
@commands.command(pass_context=True, hidden=True)
async def sudoku(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/VHlIZRC.png")
@commands.command(pass_context=True, hidden=True)
async def baka(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/OyjCHNe.png")
@commands.command(pass_context=True, hidden=True)
async def mugi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/lw80tT0.gif")
@commands.command(pass_context=True, hidden=True)
async def lisp(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/RQeZErU.png")
@commands.command(pass_context=True, hidden=True)
async def dev(self, ctx):
"""Reminds user where they are."""
await self.bot.say("You seem to be in <#196635781798952960>.")
@commands.command(pass_context=True, hidden=True)
async def headrub(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/j6xSoKv.jpg")
@commands.command(pass_context=True, hidden=True)
async def blackalabi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/JzFem4y.png")
@commands.command(pass_context=True, hidden=True)
async def nom(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/p1r53ni.jpg")
# Load the extension
def setup(bot):
bot.add_cog(Memes(bot))
|
2014c2g12/c2g12 | wsgi/w2/c2_w2.py | Python | gpl-2.0 | 9,606 | 0.005416 |
########################### 1. 導入所需模組
import cherrypy
import os
########################### 2. 設定近端與遠端目錄
# 確定程式檔案所在目錄, 在 Windows 有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
########################### 3. 建立主物件
class HelloWorld(object):
_cp_config = {
# if there is no utf-8 encoding, no Chinese input available
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
'tools.sessions.locking' : 'explicit',
'tools.sessions.storage_path' : data_dir+'/tmp',
# session timeout is 60 minutes
'tools.sessions.timeout' : 60
}
@cherrypy.expose
def fileuploadform(self):
return '''<h1>file upload</h1>
<script src="/static/jquery.js" type="text/javascript"></script>
<script src="/static/axuploader.js" type="text/javascript"></script>
<script>
$(document).ready(function(){
$('.prova').axuploader({url:'/fileaxupload', allowExt:['jpg','png','gif','7z','pdf','zip','flv','stl','txt'],
finish:function(x,files)
{
alert('All files have been uploaded: '+files);
},
enable:true,
remotePath:function(){
return 'downloads/';
}
});
});
</script>
<div class="prova"></div>
<input type="button" onclick="$('.prova').axuploader('disable')" value="asd" />
<input type="button" onclick="$('.prova').axuploader('enable')" value="ok" />
</section></body></html>
'''
@cherrypy.expose
def brythonuploadform(self):
return '''<h1>file upload</h1>
<script type="text/javascript" src="/static/Brython2.0.0-20140209-164925/brython.js"></script>
<script type="text/javascript" >
function getradio(tagname){
var radios = document.getElementsByName(tagname);
for (var i = 0, length = radios.length; i < length; i++) {
if (radios[i].checked) {
// do whatever you want with the checked radio
return radios[i].value;
// only one radio can be logically checked, don't check the rest
break;
}
}
}
function run_js(){
var cons = document.getElementById("console")
var jscode = cons.value
var t0 = (new Date()).getTime()
eval(jscode)
var t1 = (new Date()).getTime()
console.log("Javascript code run in "+(t1-t0)+" ms")
}
</script>
<script type="text/python3" src="/static/editor.py"></script>
<script type="text/python3">
from browser import doc
overwrite = 0
# add delete_program 1/7, seven steps to complete the ajax task, the last step is to add delete_program function on server
# delete1 and delete2 parameters are also added into save_program function.
delete1 = 0
delete2 = 0
def set_debug(ev):
if ev.target.checked:
__BRYTHON__.debug = 1
else:
__BRYTHON__.debug = 0
def set_overwrite(ev):
global overwrite
if ev.target.checked:
overwrite = 1
else:
overwrite = 0
# add delete_program 2/7, client side add set_delete1 and set_delete2 functions.
def set_delete1(ev):
global delete1
if ev.target.checked:
delete1 = 1
else:
delete1 = 0
def set_delete2(ev):
global delete2
if ev.target.checked:
delete2 = 1
else:
delete2 = 0
#### ajax process
from browser import ajax,doc
def on_complete(req):
print(req.readyState)
print('status',req.status)
if req.status==200 or req.status==0:
# show request text on id=result division
doc["result"].html = req.text
else:
doc["result"].html = "error "+req.text
def err_msg():
doc["result"].html = "server didn't reply after %s seconds" %timeout
timeout = 4
def go(url):
req = ajax.ajax()
req.bind('complete', on_complete)
req.set_timeout(timeout, err_msg)
req.open('GET', url, True)
req.send()
def post(url):
req = ajax.ajax()
req.bind('complete', on_complete)
req.set_timeout(timeout, err_msg)
req.open('POST', url, True)
req.set_header('content-type','application/x-www-form-urlencoded')
# doc["filename"].value is the id=filename input field's value
# editor.getValue() is the content on editor, need to send dictionary format data
# while post url, need to save editor content into local_storage to use the previous load javascripts
storage["py_src"] = editor.getValue()
# add delete_program 3/7, two parameters added, this will also affect save_program function on server.
req.send({'filename':doc["filename"].value, 'editor':editor.getValue(), 'overwrite':overwrite, 'delete1':delete1, 'delete2':delete2})
# get program from server
| def get_prog(ev):
# ajax can only read data from server
_nam | e = '/brython_programs/'+doc["filename"].value
try:
editor.setValue(open(_name, encoding="utf-8").read())
doc["result"].html = doc["filename"].value+" loaded!"
except:
doc["result"].html = "can not get "+doc["filename"].value+"!"
editor.scrollToRow(0)
editor.gotoLine(0)
reset_theme()
def get_radio(ev):
from javascript import JSObject
filename = JSObject(getradio)("filename")
# ajax can only read data from server
doc["filename"].value = filename
_name = '/brython_programs/'+filename
editor.setValue(open(_name, encoding="utf-8").read())
doc["result"].html = filename+" loaded!"
editor.scrollToRow(0)
editor.gotoLine(0)
reset_theme()
# bindings
doc['run_js'].bind('click',run_js)
doc['set_debug'].bind('change',set_debug)
doc['set_overwrite'].bind('change',set_overwrite)
# add delete_program 4/7, two associated binds added
doc['set_delete1'].bind('change',set_delete1)
doc['set_delete2'].bind('change',set_delete2)
# next functions are defined in editor.py
doc['show_js'].bind('click',show_js)
doc['run'].bind('click',run)
doc['show_console'].bind('click',show_console)
# get_prog and get _radio (working)
doc['get_prog'].bind('click', get_prog)
doc['get_radio'].bind('click', get_radio)
# reset_the_src and clear_console (working)
doc['reset_the_src'].bind('click',reset_the_src)
doc['clear_console'].bind('click',clear_console)
# clear_canvas and clear_src
doc['clear_canvas'].bind('click',clear_canvas)
doc['clear_src'].bind('click',clear_src)
# only admin can save program to server
doc['save_program'].bind('click',lambda ev:post('/save_program'))
# add delete_program 5/7, delete_program button bind to execute delete_program on server.
doc['delete_program'].bind('click',lambda ev:post('/delete_program'))
</script>
<script type="text/javascript">
window.onload=brython({debug:1, cache:'version'});
</script>
<div class="prova"></div>
<input type="button" onclick="$('.prova').axuploader('disable')" value="asd" />
<input type="button" onclick="$('.prova').axuploader('enable')" value="ok" />
</section></body></html>
'''
@cherrypy.expose
def fileaxupload(self, *args, **kwargs):
filename = kwargs["ax-file-name"]
flag = kwargs["start"]
# 終於找到 bug, 因為從 kwargs[] 所取得的變數為字串, 而非數字, 先前用 flag == 0 是錯誤的
if flag == "0":
# 若從 0 byte 送起, 表示要開啟新檔案
file = open(download_root_dir+"downloads/"+filename, "wb")
else:
file = open(download_root_dir+"downloads/"+filename, "ab")
file.write(cherrypy.request.body.read())
file.close()
|
keras-team/keras | keras/initializers/__init__.py | Python | apache-2.0 | 7,577 | 0.007523 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializer serialization / deserialization."""
import tensorflow.compat.v2 as tf
import threading
from tensorflow.python import tf2
from keras.initializers import initializers_v1
from keras.initializers import initializers_v2
from keras.utils import generic_utils
from keras.utils import tf_inspect as inspect
from tensorflow.python.ops import init_ops
from tensorflow.python.util.tf_export import keras_export
# LOCAL.ALL_OBJECTS is meant to be a global mutable. Hence we need to make it
# thread-local to avoid concurrent mutations.
LOCAL = threading.local()
def populate_deserializable_objects():
"""Populates dict ALL_OBJECTS with every built-in initializer.
"""
global LOCAL
if not hasattr(LOCAL, 'ALL_OBJECTS'):
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = None
if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf.__internal__.tf2.enabled():
# Objects dict is already generated for the proper TF version:
# do nothing.
return
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = tf.__internal__.tf2.enabled()
# Compatibility aliases (need to exist in both V1 and V2).
LOCAL.ALL_OBJECTS['ConstantV2'] = initializers_v2.Constant
LOCAL.ALL_OBJECTS['GlorotNormalV2'] = initializers_v2.GlorotNormal
LOCAL.ALL_OBJECTS['GlorotUniformV2'] = initializers_v2.GlorotUniform
LOCAL.ALL_OBJECTS['HeNormalV2'] = initializers_v2.HeNormal
LOCAL.ALL_OBJECTS['HeUniformV2'] = initializers_v2.HeUniform
LOCAL.ALL_OBJECTS['IdentityV2'] = initializers_v2.Identity
LOCAL.ALL_OBJECTS['LecunNormalV2'] = initializers_v2.LecunNormal
LOCAL.ALL_OBJECTS['LecunUniformV2'] = initializers_v2.LecunUniform
LOCAL.ALL_OBJECTS['OnesV2'] = initializers_v2.Ones
L | OCAL.ALL_OBJECTS['OrthogonalV2'] = initializers_v2.Orthogonal
LOCAL.ALL_OBJECTS['RandomNo | rmalV2'] = initializers_v2.RandomNormal
LOCAL.ALL_OBJECTS['RandomUniformV2'] = initializers_v2.RandomUniform
LOCAL.ALL_OBJECTS['TruncatedNormalV2'] = initializers_v2.TruncatedNormal
LOCAL.ALL_OBJECTS['VarianceScalingV2'] = initializers_v2.VarianceScaling
LOCAL.ALL_OBJECTS['ZerosV2'] = initializers_v2.Zeros
# Out of an abundance of caution we also include these aliases that have
# a non-zero probability of having been included in saved configs in the past.
LOCAL.ALL_OBJECTS['glorot_normalV2'] = initializers_v2.GlorotNormal
LOCAL.ALL_OBJECTS['glorot_uniformV2'] = initializers_v2.GlorotUniform
LOCAL.ALL_OBJECTS['he_normalV2'] = initializers_v2.HeNormal
LOCAL.ALL_OBJECTS['he_uniformV2'] = initializers_v2.HeUniform
LOCAL.ALL_OBJECTS['lecun_normalV2'] = initializers_v2.LecunNormal
LOCAL.ALL_OBJECTS['lecun_uniformV2'] = initializers_v2.LecunUniform
if tf.__internal__.tf2.enabled():
# For V2, entries are generated automatically based on the content of
# initializers_v2.py.
v2_objs = {}
base_cls = initializers_v2.Initializer
generic_utils.populate_dict_with_module_objects(
v2_objs,
[initializers_v2],
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))
for key, value in v2_objs.items():
LOCAL.ALL_OBJECTS[key] = value
# Functional aliases.
LOCAL.ALL_OBJECTS[generic_utils.to_snake_case(key)] = value
else:
# V1 initializers.
v1_objs = {
'Constant': tf.compat.v1.constant_initializer,
'GlorotNormal': tf.compat.v1.glorot_normal_initializer,
'GlorotUniform': tf.compat.v1.glorot_uniform_initializer,
'Identity': tf.compat.v1.initializers.identity,
'Ones': tf.compat.v1.ones_initializer,
'Orthogonal': tf.compat.v1.orthogonal_initializer,
'VarianceScaling': tf.compat.v1.variance_scaling_initializer,
'Zeros': tf.compat.v1.zeros_initializer,
'HeNormal': initializers_v1.HeNormal,
'HeUniform': initializers_v1.HeUniform,
'LecunNormal': initializers_v1.LecunNormal,
'LecunUniform': initializers_v1.LecunUniform,
'RandomNormal': initializers_v1.RandomNormal,
'RandomUniform': initializers_v1.RandomUniform,
'TruncatedNormal': initializers_v1.TruncatedNormal,
}
for key, value in v1_objs.items():
LOCAL.ALL_OBJECTS[key] = value
# Functional aliases.
LOCAL.ALL_OBJECTS[generic_utils.to_snake_case(key)] = value
# More compatibility aliases.
LOCAL.ALL_OBJECTS['normal'] = LOCAL.ALL_OBJECTS['random_normal']
LOCAL.ALL_OBJECTS['uniform'] = LOCAL.ALL_OBJECTS['random_uniform']
LOCAL.ALL_OBJECTS['one'] = LOCAL.ALL_OBJECTS['ones']
LOCAL.ALL_OBJECTS['zero'] = LOCAL.ALL_OBJECTS['zeros']
# For backwards compatibility, we populate this file with the objects
# from ALL_OBJECTS. We make no guarantees as to whether these objects will
# using their correct version.
populate_deserializable_objects()
globals().update(LOCAL.ALL_OBJECTS)
# Utility functions
@keras_export('keras.initializers.serialize')
def serialize(initializer):
return generic_utils.serialize_keras_object(initializer)
@keras_export('keras.initializers.deserialize')
def deserialize(config, custom_objects=None):
"""Return an `Initializer` object from its config."""
populate_deserializable_objects()
return generic_utils.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name='initializer')
@keras_export('keras.initializers.get')
def get(identifier):
"""Retrieve a Keras initializer by the identifier.
The `identifier` may be the string name of a initializers function or class (
case-sensitively).
>>> identifier = 'Ones'
>>> tf.keras.initializers.deserialize(identifier)
<...keras.initializers.initializers_v2.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that the
`class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> tf.keras.initializers.deserialize(cfg)
<...keras.initializers.initializers_v2.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
Raises:
ValueError: If the input identifier is not a supported type or in a bad
format.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, str):
identifier = str(identifier)
return deserialize(identifier)
elif callable(identifier):
if inspect.isclass(identifier):
identifier = identifier()
return identifier
else:
raise ValueError('Could not interpret initializer identifier: ' +
str(identifier))
|
Micronaet/micronaet-addons-private | task_manager/wizard/wizard_report.py | Python | agpl-3.0 | 11,463 | 0.015354 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# Copyright (C) 2004-2012 Micronaet srl. All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields,osv
import netsvc
class intervent_report_collection_wizard(osv.osv_memory):
''' Wizard that permit to:
select type of report
Filter element by some value (user, customer etc.)
The wizard return aeroo report selected
'''
_name = "intervent.report.collection.wizard"
# On change function:
def on_change_month(self, cr, uid, ids, month, context=None):
'''
'''
import time
res={'value':{}}
if month:
res['value']={'from_date':"%s-%02d-01 00:00:00"%(time.strftime('%Y'), int(month)),
'to_date': "%04d-%02d-01 00:00:00"%(int(time.strftime('%Y')) if month != "12" else int(time.strftime('%Y')) + 1, int(month) + 1 if month!="12" else 1,)}
return res
# Button function of the wizard:
# Extra function usefull:
def _get_action_report_to_return(self, cr, uid, ids, intervent_ids, report_name, title, context=None):
''' Compose action to return according to passed: intervent_ids, report_name, title
'''
datas = {'ids': intervent_ids,}
datas['model'] = 'intervention.report'
datas['form'] = self.read(cr, uid, ids)[0]
datas['title']= title
# return action:
return {
'type': 'ir.actions.report.xml',
'report_name': report_name, # TODO one for all??
'datas': datas,
}
def _get_filter_from_wizard(self, cr, uid, ids, with_partner=True, context=None):
''' In wizard passed get the filter selected in normal domain format
the with_partner parameter is used for parametrize this function
for filter report or for load all partner of the filtered values
(in this case, obviously there's no selection...)
'''
if context is None:
context = {}
wiz_proxy = self.browse(cr, uid, ids)[0]
domain=[]
if wiz_proxy.partner_id and with_partner:
domain += [('partner_id','=',wiz_proxy.partner_id.id)]
if wiz_proxy.user_id:
domain += [('user_id','=',wiz_proxy.user_id.id)]
if wiz_proxy.from_date:
domain += [('date_start','>=',"%s %s"%(wiz_proxy.from_date, "00:00:00"))]
if wiz_proxy.to_date:
domain += [('date_start','<',"%s %s"%(wiz_proxy.to_date, "00:00:00"))]
if wiz_proxy.is_intervent: # only intervent
domain += [('state','in', ('confirmed','closed','reported'))]
if wiz_proxy.is_closed: # only intervent
domain += [('state','in', ('closed','reported'))]
return domain
def _get_report_parameter_for_action(self, group_by):
''' Return report parameter: (report_name, title, order)
according to group_by clause passed
'''
if group_by == 'state': # group state
return ("intervent_report_state", "Intervent report list (group by state)", "partner_id,ref,date_start",)
elif group_by == 'tipology': # group tipology
return ("intervent_report_tipology", "Intervent report list (group by tipology)", "tipology_id,date_start",)
elif group_by == 'partner': # group tipology
return ("intervent_report_partner", "Intervent grouped by tipology" ,"partner_id,date")
elif group_by == 'list': # group tipology
return ("intervent_report", "Intervent report list (group by state)", "partner_id,date_start")
else: # no report (case impossible)
return (False,False,False) # comunicate error
def print_load_partner(self, cr, uid, ids, context=None):
''' Test filter selected and get partner list for intervent
'''
domain=self._get_filter_from_wizard(cr, uid, ids, with_partner=False, context=context)
intervent_ids = self.pool.get('intervention.report').search(cr, uid, domain, context=context)
intervent_proxy = self.pool.get('intervention.report').browse(cr, uid, intervent_ids, context=context)
partner_ids=[]
for intervent in intervent_proxy:
if intervent.partner_id and intervent.partner_id.id not in partner_ids:
partner_ids.append(intervent.partner_id.id)
if intervent_ids:
# write new list of elements
self.write(cr, uid, ids, {'partner_ids': [(6, 0, partner_ids)]})
return True
def print_save_partner_report(self, cr, uid, ids, context=None):
''' Save partner report (state and intervent) for that period
Call each partner (pre loaded) and save for each of it 2 report
status report and intervent report
'''
import time, base64, xmlrpclib
user_proxy=self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0]
db=cr.dbname
uid=uid
pwd=user_proxy.password
model = 'intervention.report'
#report_name="intervent_report"
report_to_print={}
report_to_print['state'] = self._get_report_parameter_for_action('state')
report_to_print['list'] = self._get_report_parameter_for_action('l | ist')
wiz_proxy=self.browse(cr, uid, ids)[0]
printsock = xmlrpclib.ServerProxy('http://loc | alhost:8069/xmlrpc/report')
#domain = self._get_filter_from_wizard(cr, uid, ids, with_partner=False, context=context)
for partner in wiz_proxy.partner_ids: #self.pool.get('intervention.report').search(cr, uid, domain, order=order, context=context)
# get intervent_ids for this partner
domain = self._get_filter_from_wizard(cr, uid, ids, with_partner=False, context=context)
domain += [('partner_id', '=', partner.id)]
for key in report_to_print.keys():
(report_name, title, order) = report_to_print[key]
intervent_ids = self.pool.get(model).search(cr, uid, domain, order=order, context=context)
#self._get_action_report_to_return(cr, uid, ids, intervent_ids, report_name, title, context=context)
if intervent_ids:
action=self._get_action_report_to_return(cr, uid, ids, intervent_ids, report_name, title, context=context)
action['report_type']='pdf'
action['model']=model
id_report = printsock.report(db, uid, pwd, report_name, intervent_ids, action)#ids {'model': model, 'report_type':'pdf'})
time.sleep(5)
state = False
attempt = 0
while not state:
report = printsock.report_get(db, uid, pwd, id_report)
state = report['state']
if not state:
time.sleep(1)
attempt += 1
if attempt>200:
print 'Pri |
rwl/PyCIM | CIM14/IEC61968/Customers/CustomerAccount.py | Python | mit | 3,833 | 0.002087 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61968.Common.Document import Document
class CustomerAccount(Document):
"""Assignment of a group of products and services purchased by the Customer through a CustomerAgreement, used as a mechanism for customer billing and payment. It contains common information from the various types of CustomerAgreements to create billings (invoices) for a Customer and receive payment.
"""
def __init__(self, PaymentTransactions=None, CustomerAgreements=None, *args, **kw_args):
"""Initialises a new 'CustomerAccount' instance.
@param PaymentTransactions: All payment transactions for this customer account.
@param CustomerAgreements: All agreements for this customer account.
"""
self._PaymentTransactions = []
self.PaymentTransactions = [] if PaymentTransactions is None else PaymentTransactions
self._CustomerAgreements = []
self.CustomerAgreements = [] if CustomerAgreements is None else CustomerAgreements
super(CustomerAccount, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["PaymentTr | ansactions", "CustomerAgreem | ents"]
_many_refs = ["PaymentTransactions", "CustomerAgreements"]
def getPaymentTransactions(self):
"""All payment transactions for this customer account.
"""
return self._PaymentTransactions
def setPaymentTransactions(self, value):
for x in self._PaymentTransactions:
x.CustomerAccount = None
for y in value:
y._CustomerAccount = self
self._PaymentTransactions = value
PaymentTransactions = property(getPaymentTransactions, setPaymentTransactions)
def addPaymentTransactions(self, *PaymentTransactions):
for obj in PaymentTransactions:
obj.CustomerAccount = self
def removePaymentTransactions(self, *PaymentTransactions):
for obj in PaymentTransactions:
obj.CustomerAccount = None
def getCustomerAgreements(self):
"""All agreements for this customer account.
"""
return self._CustomerAgreements
def setCustomerAgreements(self, value):
for x in self._CustomerAgreements:
x.CustomerAccount = None
for y in value:
y._CustomerAccount = self
self._CustomerAgreements = value
CustomerAgreements = property(getCustomerAgreements, setCustomerAgreements)
def addCustomerAgreements(self, *CustomerAgreements):
for obj in CustomerAgreements:
obj.CustomerAccount = self
def removeCustomerAgreements(self, *CustomerAgreements):
for obj in CustomerAgreements:
obj.CustomerAccount = None
|
sveinugu/gtrackcore | gtrackcore/input/core/GenomeElement.py | Python | gpl-3.0 | 7,445 | 0.009268 | from gtrackcore.track.core.GenomeRegion import GenomeRegion
from gtrackcore.util.CommonConstants import BINARY_MISSING_VAL
from gtrackcore.util.CommonFunctions import isNan
from gtrackcore.util.CustomExceptions import NotSupportedError
class GenomeElement(GenomeRegion):
@staticmethod
def createGeFromTrackEl(trackEl, tf, globalCoords=True):
genomeAnchor = trackEl._trackView.genomeAnchor
genome = genomeAnchor.genome
start = None if (tf.isDense() and tf.isInterval()) else trackEl.start()
end = None if (not tf.isInterval() and not tf.isDense()) else trackEl.end()
edges = trackEl.edges()[trackEl.edges() != ''] if trackEl.edges() is not None else None
weights = trackEl.weights()[trackEl.edges() != ''] if trackEl.weights() is not None else None
if globalCoords:
chr = genomeAnchor.chr
if start is not None:
start += genomeAnchor.start
if end is not None:
end += genomeAnchor.start
else:
chr = str(genomeAnchor)
return GenomeElement(genome, chr, start, end, trackEl.val(), trackEl.strand(), \
id=trackEl.id(), edges=edges, weights=weights, \
extra=dict([(key, getattr(trackEl, key)()) for key in trackEl.getAllExtraKeysInOrder()]), \
orderedExtraKeys=trackEl.getAllExtraKeysInOrder())
def __init__(self, genome=None, chr=None, start=None, end=None, val=None, strand=None, id=None, edges=None, weights=None, extra=None, orderedExtraKeys=None, isBlankElement=False, **kwArgs):
# __dict__ is used for speedup, so that __setattr__ is not called
members = self.__dict__
members['genome'] = genome
members['chr'] = chr #sequence id (string)
members['start'] = start #start posision (int, 0-indexed)
members['end'] = end #end position (int, 0-indexed, end-exclusive)
members['val'] = val #value (float (number), string (category (n>1) or character (n=1)), int (1 for case, 0 for control, -1 for missing) or lists of the same)
members['strand'] = strand #DNA strand (int, 1 for '+', 0 for '-', -1 for missing)
members['id'] = id #unique id (string)
members['edges'] = edges #ids of linked elements (list of strings)
members['weights'] = weights #resp. weights of edges (list of values, using similar types as for 'value' above)
members['isBlankElement'] = isBlankElement
if extra is None:
members['orderedExtraKeys'] = [] #keys in extra dict in correct order. Is used instead of OrderedDict because of performance issues
members['extra'] = {}
else:
if orderedExtraKeys is None:
members['orderedExtraKeys'] = extra.keys()
else:
members['orderedExtraKeys'] = orderedExtraKeys
members['extra'] = dict(extra) #dict of extra columns, from column name (str) -> contents (str)
for kw in kwArgs:
members['orderedExtraKeys'].append(kw)
members['extra'][kw] = kwArgs[kw]
def __copy__(self):
raise NotSupportedError('Shallow copy.copy() of GenomeElement objects is not supported, '
'as this produces unwanted effects. Please use instance method '
'getCopy() or copy.deepcopy() instead. getCopy() is by far the '
'most efficient of the two.')
def getCopy(self):
extraCopy = dict(self.extra)
orderedExtraKeysCopy = list(self.orderedExtraKeys)
return GenomeElement(self.genome, self.chr, self.start, self.end, self.val, self.strand, self.id, self.edges, self.weights, extraCopy, orderedExtraKeysCopy)
def __getattr__(self, item):
try:
return self.__dict__['extra'][item]
except KeyError:
raise AttributeError
def __setattr__(self, item, value):
if item not in self.__dict__ and 'extra' in self.__dict__:
if item not in self.__dict__['extra']:
self.__dict__['orderedExtraKeys'].append(item)
self.__dict__['extra'][item] = value
else:
object.__setattr__(self, item, value)
def __str__(self):
#return self.toStr()
#self.start+1 because we want to show 1-indexed, end inclusive output
return (str(self.chr) + ':' if not self.chr is None else '')\
+ (str(self.start+1) if not self.start is None else '')\
+ ('-' + str(self.end) if not self.end is None else '')\
+ ((' (Pos)' if self.strand else ' (Neg)') if not self.strand in [None, BINARY_MISSING_VAL] else '')\
+ ((' [' + str(self.val) + ']') if se | lf.val is not None else '')
def __repr__(self):
return str(self)
def toStr(self):
#self.start+1 because we want to show 1-indexed, end inclu | sive output
return (str(self.genome) + ':' if not self.genome is None else '')\
+ (str(self.chr) + ':' if not self.chr is None else '')\
+ (str(self.start+1) if not self.start is None else '')\
+ ('-' + str(self.end) if not self.end is None else '')\
+ ((' (Pos)' if self.strand else ' (Neg)') if not self.strand in [None, BINARY_MISSING_VAL] else '')\
+ ((' [' + str(self.val) + ']') if self.val != None else '')\
+ ((' id="%s"' % self.id) if self.id != None else '')\
+ ((' edges="%s"' % str(self.edges)) if self.edges != None else '')\
+ ((' weights="%s"' % str(self.weights)) if self.weights != None else '')\
+ ((' extra="%s"' % str(self.extra)) if self.extra != {} else '')
def __cmp__(self, other):
if other is None:
return -1
else:
#print self.toStr()
#print other.toStr()
#print [cmp(getattr(self, attr), getattr(other, attr)) for attr in ['genome','chr','start','end','val','strand','id','edges','weights','extra']]
try:
return cmp([self.genome, self.chr, self.start, self.end, self.val, self.strand, self.id, self.edges, self.weights, self.extra] , \
[other.genome, other.chr, other.start, other.end, other.val, other.strand, other.id, other.edges, other.weights, other.extra])
except:
if isinstance(other, GenomeRegion):
return GenomeRegion.__cmp__(self, other)
def overlaps(self, other):
assert all((getattr(self, attr) is None) == (getattr(other, attr) is None) \
for attr in ['genome', 'chr', 'start', 'end'])
if self.reprIsDense():
return False
if self.genome is not None:
if self.genome != other.genome:
return False
if self.chr != other.chr:
return False
if not None in [self.start, self.end]:
return False if self.start >= other.end or self.end <= other.start else True
else:
return True if (self.start is not None and self.start == other.start) or \
(self.end is not None and self.end == other.end) else False
def reprIsDense(self):
return self.start is None and self.end is None
def validAsRegion(self):
return not None in [self.genome, self.chr, self.start, self.end]
|
gw0/pelican-plugins | video_privacy_enhancer/video_privacy_enhancer.py | Python | agpl-3.0 | 7,716 | 0.008683 | """
Video Privacy Enhancer
--------------------------
Authored by Jacob Levernier, 2014
Released under the GNU AGPLv3
For more information on this plugin, please see the attached Readme.md file.
"""
"""
SETTINGS
"""
# Do not use a leading or trailing slash below (e.g., use "images/video-thumbnails"):
output_directory_for_thumbnails = "images/video-thumbnails"
"""
In order for this plugin to work optimally, you need to do just a few things:
1. Enable the plugn in pelicanconf.py (see http://docs.getpelican.com/en/3.3.0/plugins.html for documentation):
PLUGIN_PATH = "/pelican-plugins"
PLUGINS = ["video_privacy_enhancer"]
2a. If necessary, install jQuery on your site (See https://stackoverflow.com/questions/1458349/installing-jquery -- the jQuery base file should go into your Pelican themes 'static' directory)
2b. Copy the jQuery file in this folder into, for example, your_theme_folder/static/video_privacy_enhancer_jQuery.js, and add a line like this to the <head></head> element of your website's base.html (or equivalent) template:
`<script src="{{ SITEURL }}/theme/video_privacy_enhancer_jquery.js"></script> <!--Load jQuery functions for the Video Privacy Enhancer Pelican plugin -->`
3. Choose a default video embed size and add corresponding CSS to your theme's CSS file:
Youtube allows the following sizes in its embed GUI (as of this writing, in March 2014). I recommend choosing one, and then having the iframe for the | actual vid | eo embed match it (so that it's a seamless transition). This can be handled with CSS in both cases, so I haven't hard-coded it here:
1280 W x 720 H
853 W x 480 H
640 W x 360 H
560 W x 315 H
Here's an example to add to your CSS file:
```
/* For use with the video-privacy-enhancer Pelican plugin */
img.video-embed-dummy-image.
iframe.embedded_youtube_video {
width: 843px;
max-height: 480px;
/* Center the element on the screen */
display: block;
margin-top: 2em;
margin-bottom: 2em;
margin-left: auto;
margin-right: auto;
}
iframe.embedded_youtube_video {
width: 843px;
height: 480px;
}
```
"""
"""
END SETTINGS
"""
from pelican import signals # For making this plugin work with Pelican.
import os.path # For checking whether files are present in the filesystem.
import re # For using regular expressions.
import urllib # For downloading the video thumbnails.
import logging
logger = logging.getLogger(__name__) # For using logger.debug() to log errors or other notes.
# A function to check whtether output_directory_for_thumbnails (a variable set above in the SETTINGS section) exists. If it doesn't exist, we'll create it.
def check_for_thumbnail_directory(pelican_output_path):
# Per http://stackoverflow.com/a/84173, check if a file exists. isfile() works on files, and exists() works on files and directories.
try:
if not os.path.exists(pelican_output_path + "/" + output_directory_for_thumbnails): # If the directory doesn't exist already...
os.makedirs(pelican_output_path + "/" + output_directory_for_thumbnails) # Create the directory to hold the video thumbnails.
return True
except:
print logger.debug("Error in checking if thumbnail folder exists and making the directory if it doesn't.") # In case something goes wrong.
return False
# A function to download the video thumbnail from YouTube (currently the only supported video platform):
def download_thumbnail(video_id_from_shortcode, pelican_output_path):
# Check if the thumbnail directory exists already:
check_for_thumbnail_directory(pelican_output_path)
# Check if the thumbnail for this video exists already (if it's been previously downloaded). If it doesn't, download it:
if not os.path.exists(pelican_output_path + "/" + output_directory_for_thumbnails + "/" + video_id_from_shortcode + ".jpg"): # If the thumbnail doesn't already exist...
urllib.urlretrieve("https://img.youtube.com/vi/" + video_id_from_shortcode + "/0.jpg", pelican_output_path + "/" + output_directory_for_thumbnails + "/" + video_id_from_shortcode + ".jpg") # Download the thumbnail. This follows the instructions at http://www.reelseo.com/youtube-thumbnail-image/ for downloading YouTube thumbnail images.
# A function to read through each page and post as it comes through from Pelican, find all instances of `!youtube(...)`, and change it into an HTML <img> element with the video thumbnail.
def process_youtube_shortcodes(data_passed_from_pelican):
if data_passed_from_pelican._content: # If the item passed from Pelican has a "content" attribute (i.e., if it's not an image file or something else like that). NOTE: data_passed_from_pelican.content (without an underscore in front of 'content') seems to be read-only, whereas data_passed_from_pelican._content is able to be overwritten. This is somewhat explained in an IRC log from 2013-02-03 from user alexis to user webdesignhero_ at https://botbot.me/freenode/pelican/2013-02-01/?tz=America/Los_Angeles.
full_content_of_page_or_post = data_passed_from_pelican._content
else:
return # Exit the function, essentially passing over the (non-text) file.
all_instances_of_the_youtube_shortcode = re.findall('\!youtube.*?\)', full_content_of_page_or_post) # Use a regular expression to find every instance of '!youtube' followed by anything up to the first matching ')'.
if(len(all_instances_of_the_youtube_shortcode) > 0): # If the article/page HAS any shortcodes, go on. Otherwise, don't (to do so would inadvertantly wipe out the output content for that article/page).
replace_shortcode_in_text = "" # This just gives this an initial value before going into the loop below.
# Go through each shortcode instance that we found above, and parse it:
for youtube_shortcode_to_parse in all_instances_of_the_youtube_shortcode:
video_id_from_shortcode = re.findall('(?<=youtube\().*?(?=\))', youtube_shortcode_to_parse)[0] # Get what's inside of the parentheses in '!youtube(...).'
# print "Video ID is " + video_id_from_shortcode # Good for debugging purposes.
# Use the Pelican pelicanconf.py settings:
pelican_output_path = data_passed_from_pelican.settings['OUTPUT_PATH']
pelican_site_url = data_passed_from_pelican.settings['SITEURL']
# Download the video thumbnail if it's not already on the filesystem:
download_thumbnail(video_id_from_shortcode, pelican_output_path)
# Replace '!youtube(...)' with '<img>...</img>'. Note that the <img> is given a class that the jQuery file mentioned at the top of this file will watch over. Any time an image with that class is clicked, the jQuery function will trigger and turn it into the full video embed.
replace_shortcode_in_text = re.sub(r'\!youtube\(' + video_id_from_shortcode + '\)', r'<img class="youtube-embed-dummy-image" id="' + video_id_from_shortcode + '" src="' + pelican_site_url + '/' + output_directory_for_thumbnails + '/' + video_id_from_shortcode + '.jpg" alt="Embedded Video - Click to view" title="Embedded Video - Click to view"></img>', full_content_of_page_or_post)
# Replace the content of the page or post with our now-updated content (having gone through all instances of the !youtube() shortcode and updated them all, exiting the loop above.
data_passed_from_pelican._content = replace_shortcode_in_text
# Make Pelican work (see http://docs.getpelican.com/en/3.3.0/plugins.html#how-to-create-plugins):
def register():
signals.content_object_init.connect(process_youtube_shortcodes)
|
rosmo/ansible | lib/ansible/modules/files/patch.py | Python | gpl-3.0 | 7,109 | 0.002532 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Luis Alberto Perez Lazaro <luisperlazaro@gmail.com>
# Copyright: (c) 2015, Jakub Jirutka <jakub@jirutka.cz>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENT | ATION = r'''
---
module: patch
author:
- Jakub Jirutka (@jirutka)
- Luis Alberto Perez Lazaro (@luisperlaz)
version_added: '1.9'
description:
- Apply patch files using the GNU patch tool.
short_description: Apply patch files using the GNU patch tool
options:
basedir:
description | :
- Path of a base directory in which the patch file will be applied.
- May be omitted when C(dest) option is specified, otherwise required.
type: path
dest:
description:
- Path of the file on the remote machine to be patched.
- The names of the files to be patched are usually taken from the patch
file, but if there's just one file to be patched it can specified with
this option.
type: path
aliases: [ originalfile ]
src:
description:
- Path of the patch file as accepted by the GNU patch tool. If
C(remote_src) is 'no', the patch source file is looked up from the
module's I(files) directory.
type: path
required: true
aliases: [ patchfile ]
state:
description:
- Whether the patch should be applied or reverted.
type: str
choices: [ absent, present ]
default: present
version_added: "2.6"
remote_src:
description:
- If C(no), it will search for src at originating/master machine, if C(yes) it will
go to the remote/target machine for the C(src).
type: bool
default: no
strip:
description:
- Number that indicates the smallest prefix containing leading slashes
that will be stripped from each file name found in the patch file.
- For more information see the strip parameter of the GNU patch tool.
type: int
default: 0
backup:
version_added: "2.0"
description:
- Passes C(--backup --version-control=numbered) to patch, producing numbered backup copies.
type: bool
default: no
binary:
version_added: "2.0"
description:
- Setting to C(yes) will disable patch's heuristic for transforming CRLF
line endings into LF.
- Line endings of src and dest must match.
- If set to C(no), C(patch) will replace CRLF in C(src) files on POSIX.
type: bool
default: no
notes:
- This module requires GNU I(patch) utility to be installed on the remote host.
'''
EXAMPLES = r'''
- name: Apply patch to one file
patch:
src: /tmp/index.html.patch
dest: /var/www/index.html
- name: Apply patch to multiple files under basedir
patch:
src: /tmp/customize.patch
basedir: /var/www
strip: 1
- name: Revert patch to one file
patch:
src: /tmp/index.html.patch
dest: /var/www/index.html
state: absent
'''
import os
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils._text import to_native
class PatchError(Exception):
pass
def add_dry_run_option(opts):
# Older versions of FreeBSD, OpenBSD and NetBSD support the --check option only.
if get_platform().lower() in ['openbsd', 'netbsd', 'freebsd']:
opts.append('--check')
else:
opts.append('--dry-run')
def is_already_applied(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, state='present'):
opts = ['--quiet', '--forward',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
add_dry_run_option(opts)
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if state == 'present':
opts.append('--reverse')
(rc, _, _) = patch_func(opts)
return rc == 0
def apply_patch(patch_func, patch_file, basedir, dest_file=None, binary=False, strip=0, dry_run=False, backup=False, state='present'):
opts = ['--quiet', '--forward', '--batch', '--reject-file=-',
"--strip=%s" % strip, "--directory='%s'" % basedir,
"--input='%s'" % patch_file]
if dry_run:
add_dry_run_option(opts)
if binary:
opts.append('--binary')
if dest_file:
opts.append("'%s'" % dest_file)
if backup:
opts.append('--backup --version-control=numbered')
if state == 'absent':
opts.append('--reverse')
(rc, out, err) = patch_func(opts)
if rc != 0:
msg = err or out
raise PatchError(msg)
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path', required=True, aliases=['patchfile']),
dest=dict(type='path', aliases=['originalfile']),
basedir=dict(type='path'),
strip=dict(type='int', default=0),
remote_src=dict(type='bool', default=False),
# NB: for 'backup' parameter, semantics is slightly different from standard
# since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~")
backup=dict(type='bool', default=False),
binary=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
required_one_of=[['dest', 'basedir']],
supports_check_mode=True,
)
# Create type object as namespace for module params
p = type('Params', (), module.params)
if not os.access(p.src, os.R_OK):
module.fail_json(msg="src %s doesn't exist or not readable" % (p.src))
if p.dest and not os.access(p.dest, os.W_OK):
module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest))
if p.basedir and not os.path.exists(p.basedir):
module.fail_json(msg="basedir %s doesn't exist" % (p.basedir))
if not p.basedir:
p.basedir = os.path.dirname(p.dest)
patch_bin = module.get_bin_path('patch')
if patch_bin is None:
module.fail_json(msg="patch command not found")
def patch_func(opts):
return module.run_command('%s %s' % (patch_bin, ' '.join(opts)))
# patch need an absolute file name
p.src = os.path.abspath(p.src)
changed = False
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, state=p.state):
try:
apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip,
dry_run=module.check_mode, backup=p.backup, state=p.state)
changed = True
except PatchError as e:
module.fail_json(msg=to_native(e), exception=format_exc())
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
bolkedebruin/airflow | tests/providers/apache/livy/operators/test_livy.py | Python | apache-2.0 | 6,889 | 0.003048 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import unittest
from unittest.mock import MagicMock, patch
import pytest
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.models.dag import DAG
from airflow.providers.apache.livy.hooks.livy import BatchState, LivyHook
from airflow.providers.apache.livy.operators.livy import LivyOperator
from airflow.utils import db, timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
mock_livy_client = MagicMock()
BATCH_ID = 100
LOG_RESPONSE = {"total": 3, "log": ['first_line', 'second_line', 'third_line']}
class TestLivyOperator(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
db.merge_conn(
Connection(
conn_id='livyunittest', conn_type='livy', host='localhost:8998', port='8998', schema='http'
)
)
@patch(
'airflow.providers.apache.livy.operators.livy.LivyHook.dump_batch_logs',
return_value=None,
)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state')
def test_poll_for_termination(self, mock_livy, mock_dump_logs):
state_list = 2 * [BatchState.RUNNING] + [BatchState.SUCCESS]
def side_effect(_, retry_args):
if state_list:
return state_list.pop(0)
# fail if does not stop right before
raise AssertionError()
mock_livy.side_effect = side_effect
task = LivyOperator(file='sparkapp', polling_interval=1, dag=self.dag, task_id='livy_example')
task._livy_hook = task.get_hook()
task.poll_for_termination(BATCH_ID)
mock_livy.assert_called_w | ith(BATCH_ID, retry_args=None)
mock_dump_logs.assert_called_with(BATC | H_ID)
assert mock_livy.call_count == 3
@patch(
'airflow.providers.apache.livy.operators.livy.LivyHook.dump_batch_logs',
return_value=None,
)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state')
def test_poll_for_termination_fail(self, mock_livy, mock_dump_logs):
state_list = 2 * [BatchState.RUNNING] + [BatchState.ERROR]
def side_effect(_, retry_args):
if state_list:
return state_list.pop(0)
# fail if does not stop right before
raise AssertionError()
mock_livy.side_effect = side_effect
task = LivyOperator(file='sparkapp', polling_interval=1, dag=self.dag, task_id='livy_example')
task._livy_hook = task.get_hook()
with pytest.raises(AirflowException):
task.poll_for_termination(BATCH_ID)
mock_livy.assert_called_with(BATCH_ID, retry_args=None)
mock_dump_logs.assert_called_with(BATCH_ID)
assert mock_livy.call_count == 3
@patch(
'airflow.providers.apache.livy.operators.livy.LivyHook.dump_batch_logs',
return_value=None,
)
@patch(
'airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state',
return_value=BatchState.SUCCESS,
)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.post_batch', return_value=BATCH_ID)
def test_execution(self, mock_post, mock_get, mock_dump_logs):
task = LivyOperator(
livy_conn_id='livyunittest',
file='sparkapp',
polling_interval=1,
dag=self.dag,
task_id='livy_example',
)
task.execute(context={})
call_args = {k: v for k, v in mock_post.call_args[1].items() if v}
assert call_args == {'file': 'sparkapp'}
mock_get.assert_called_once_with(BATCH_ID, retry_args=None)
mock_dump_logs.assert_called_once_with(BATCH_ID)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.post_batch')
def test_execution_with_extra_options(self, mock_post):
extra_options = {'check_response': True}
task = LivyOperator(
file='sparkapp', dag=self.dag, task_id='livy_example', extra_options=extra_options
)
task.execute(context={})
assert task.get_hook().extra_options == extra_options
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.delete_batch')
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.post_batch', return_value=BATCH_ID)
def test_deletion(self, mock_post, mock_delete):
task = LivyOperator(
livy_conn_id='livyunittest', file='sparkapp', dag=self.dag, task_id='livy_example'
)
task.execute(context={})
task.kill()
mock_delete.assert_called_once_with(BATCH_ID)
def test_injected_hook(self):
def_hook = LivyHook(livy_conn_id='livyunittest')
task = LivyOperator(file='sparkapp', dag=self.dag, task_id='livy_example')
task._livy_hook = def_hook
assert task.get_hook() == def_hook
@patch(
'airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_state',
return_value=BatchState.SUCCESS,
)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.get_batch_logs', return_value=LOG_RESPONSE)
@patch('airflow.providers.apache.livy.operators.livy.LivyHook.post_batch', return_value=BATCH_ID)
def test_log_dump(self, mock_post, mock_get_logs, mock_get):
task = LivyOperator(
livy_conn_id='livyunittest',
file='sparkapp',
dag=self.dag,
task_id='livy_example',
polling_interval=1,
)
with self.assertLogs(task.get_hook().log, level=logging.INFO) as cm:
task.execute(context={})
assert 'INFO:airflow.providers.apache.livy.hooks.livy.LivyHook:first_line' in cm.output
assert 'INFO:airflow.providers.apache.livy.hooks.livy.LivyHook:second_line' in cm.output
assert 'INFO:airflow.providers.apache.livy.hooks.livy.LivyHook:third_line' in cm.output
mock_get.assert_called_once_with(BATCH_ID, retry_args=None)
mock_get_logs.assert_called_once_with(BATCH_ID, 0, 100)
|
bohlian/frappe | frappe/www/qrcode.py | Python | mit | 1,245 | 0.026506 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from urlparse import parse_qs
from frappe.twofactor import get_qr_svg_code
def get_context(context):
context.no_cache = 1
context.qr_code_user,context.qrcode_svg = get_user_svg_from_cache()
def get_query_key():
'''Return query string arg.'''
query_string = frappe.local.request.query_string
query = parse_qs(query_string)
if not 'k' in query.keys():
frappe.throw(_('Not Permitted'),frappe.PermissionError)
query = (query['k'][0]).strip()
if False in [i.isalpha() or i.isdigit() for i in query]:
frappe.throw(_('Not Permitted'),frappe.PermissionError)
return query
def get_user_svg_from_cache():
'''Get User and SVG code from cache.'''
key = get_query_key()
totp_uri = frappe.cache().get_value("{}_uri".format(key))
user = frappe.cache().get_value("{}_user".format(key))
if not totp_uri or not user:
frappe.throw(_('Page has expired!'),frappe.PermissionError)
if not frappe.db.exists('User',user): |
frappe.throw(_('Not Permitted'), frappe.PermissionError)
user = frapp | e.get_doc('User',user)
svg = get_qr_svg_code(totp_uri)
return (user,svg)
|
XefPatterson/INF8225_Project | Model/queues.py | Python | mit | 3,465 | 0.001154 | import tensorflow as tf
import os
def create_single_queue(bucket_id, filename, batch_size, buckets):
"""
Return a shuffle_queue which output element from {bucket_id} bucket
:param bucket_id: int
:param filename: str
:param batch_size: int
:param buckets: list
:return:
"""
file_name = os.path.dirname(os.path.abspath(__file__))
path_to_save_example = os.path.join(file_name, os.pardir, "Examples")
filename = os.path.join(path_to_save_example, "{}{}.tfrecords".format(filename, bucket_id))
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
# Read a single example
_, serialized_example = reader.read(filename_queue)
# Scalar features
context_features = {
"length_question": tf.FixedLenFeature([], dtype=tf.int64),
"length_answer": tf.FixedLenFeature([], dtype=tf.int64)
}
# Tensor features
sequence_features = {
"question": tf.VarLenFeature(dtype=tf.int64),
"answer": tf.VarLenFeature(dtype=tf.int64)
| }
# Parse a single example
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example, |
context_features=context_features,
sequence_features=sequence_features
)
batch_size = batch_size
capacity = 10 * batch_size
min_after_dequeue = 9 * batch_size
# Basically, pad question with zeros if shorter than buckets[bucket_id][0]
length_question = context_parsed["length_question"]
question = sequence_parsed["question"]
question = tf.sparse_tensor_to_dense(question)
question = tf.reshape(question, [-1])
pad_question = tf.zeros(shape=[buckets[bucket_id][0] - tf.cast(length_question, tf.int32)], dtype=tf.int64)
question = tf.concat([question, pad_question], axis=0)
question.set_shape(buckets[bucket_id][0])
# Basically, pad answer with zeros if shorter than buckets[bucket_id][1]
length_answer = context_parsed["length_answer"]
answer = sequence_parsed["answer"]
answer = tf.sparse_tensor_to_dense(answer)
answer = tf.reshape(answer, [-1])
pad_answer = tf.zeros(shape=[buckets[bucket_id][0] - tf.cast(length_answer, tf.int32)], dtype=tf.int64)
answer = tf.concat([answer, pad_answer], axis=0)
answer.set_shape(buckets[bucket_id][1])
# Shuffle queue
return tf.train.shuffle_batch([question, answer],
batch_size,
capacity,
min_after_dequeue)
def create_queues_for_bucket(batch_size, filename, buckets):
"""
For every buckets, create a ShuffleQueue
Then create a FIFOQueue on top of this queues (used for filtering queues)
:param batch_size: int
:param filename: str
:param buckets: list
:return:
"""
shuffle_queues = []
for bucket_id in range(len(buckets)):
shuffle_queues.append(create_single_queue(bucket_id, filename, batch_size, buckets))
capacity = 30 * batch_size
# For every buckets, create a queue which return batch_size example
# of that bucket
all_queues, enqueue_ops = [], []
for bucket_id in range(len(buckets)):
queue = tf.FIFOQueue(capacity=capacity, dtypes=[tf.int64, tf.int64])
all_queues.append(queue)
enqueue_op = queue.enqueue(shuffle_queues[bucket_id])
enqueue_ops.append(enqueue_op)
return all_queues, enqueue_ops
|
saltstack/salt-pylint | saltpylint/checkers.py | Python | apache-2.0 | 646 | 0.001548 | # -*- coding: utf-8 -*-
'''
saltpylint.checkers
~~~~~~~~~~~~~~~~~~~~
Works around older astroid versions
'''
# Import python libs
from __future__ import absolute_import
# Import pylint libs
import astroid
from pylint.checkers import BaseChecker as _Ba | seChecker
# Imported to avoid needing a separate import from pylint.checkers
from pylint.checkers import utils
class | BaseChecker(_BaseChecker):
def __init__(self, *args, **kwargs):
super(BaseChecker, self).__init__(*args, **kwargs)
if hasattr(self, 'visit_call') and not hasattr(astroid, 'Call'):
setattr(self, 'visit_callfunc', self.visit_call)
|
skyoo/jumpserver | apps/perms/serializers/asset/permission.py | Python | gpl-2.0 | 2,536 | 0.000394 | # -*- coding: utf-8 -*-
#
from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
from orgs.mixins.serializers import BulkOrgResourceModelSerializer
from perms.models import AssetPermission, Action
__all__ = [
'AssetPermissionSerializer',
'ActionsField',
]
class ActionsField(serializers.MultipleChoiceField):
def __init__(self, *args, **kwargs):
kwargs['choices'] = Action.CHOICES
super().__init__(*args, **kwargs)
def to_representation(self, value):
return Action.value_to_choices(value)
def to_internal_value(self, data):
if data is None:
return data
return Action.choices_to_value(data)
class ActionsDisplayField(ActionsField):
def to_representation(self, value):
values = super().to_representation(value)
choices = dict(Action.CHOICES)
return [choices.get(i) for i in values]
class AssetPermissionSerializer(BulkOrgResourceModelSerializer):
actions = ActionsField(required=False, allow_null=True)
is_valid = serializers.BooleanField(read_only=True)
is_expired = serializers.BooleanField(read_only=True)
class Meta:
model = AssetPermission
mini_fields = ['id', 'name']
small_fields = mini_fields + [
'is_active', 'is_expired', 'is_valid', 'actions',
'created_by', 'date_created', 'date_expir | ed',
'date_start', 'comment'
]
m2m_fields = [
'users', 'user_groups', 'assets', 'nodes', 'system_users',
'users_amount', 'user_groups_amount', 'assets_amount',
'nodes_amount', 'system_users_amount',
]
fields = small_fields + m2m_fields
read_only_fields = ['created_by', 'date_created']
extra_kwargs = {
'is_expired': {'label': _('Is expired')},
'is_valid': {'label': _('I | s valid')},
'actions': {'label': _('Actions')},
'users_amount': {'label': _('Users amount')},
'user_groups_amount': {'label': _('User groups amount')},
'assets_amount': {'label': _('Assets amount')},
'nodes_amount': {'label': _('Nodes amount')},
'system_users_amount': {'label': _('System users amount')},
}
@classmethod
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related('users', 'user_groups', 'assets', 'nodes', 'system_users')
return queryset
|
endlessm/chromium-browser | third_party/chromite/third_party/infra_libs/test/utils_test.py | Python | bsd-3-clause | 1,350 | 0.006667 | # -*- encoding: utf-8 -*-
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import infra_libs
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
class _UtilTestException(Exception):
"""Exception used inside tests."""
class TemporaryDirectoryTest(unittest.TestCase):
def test_tempdir_no_error(self):
with infra_libs.temporary_directory() as tempdir:
self.assertTrue(os.path.isdir(tempdir))
# This should work.
with open(os.path.join(tempdir, 'test_tempdir_no_error.txt'), 'w') as f:
f.write('nonsensical content')
# And everything should have been cleaned up afterward
self.assertFalse(os.path.isdir(tempdir))
def test_tempdir_with_exception(self):
with self.assertRaises(_UtilTestException):
with infra_libs.temporary | _directory() as tempdir:
self.assertTrue(os.path.isdir(tempdir))
# Create | a non-empty file to check that tempdir deletion works.
with open(os.path.join(tempdir, 'test_tempdir_no_error.txt'), 'w') as f:
f.write('nonsensical content')
raise _UtilTestException()
# And everything should have been cleaned up afterward
self.assertFalse(os.path.isdir(tempdir))
|
TomBaxter/waterbutler | tasks.py | Python | apache-2.0 | 3,064 | 0.002937 | import os
from invoke import task
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
def monkey_patch(ctx):
# Force an older cacert.pem from certifi v2015.4.28, prevents an ssl failure w/ identity.api.rackspacecloud.com.
#
# SubjectAltNameWarning: Certificate for identity.api.rackspacecloud.com has no `subjectAltName`, falling
# back to check for a `commonName` for now. This feature is being removed by major browsers and deprecated by
# RFC 2818. (See https://github.com/shazow/urllib3/issues/497 for details.)
# SubjectAltNameWarning
import ssl
import certifi
_create_default_context = ssl.create_default_context
def create_default_context(purpose=ssl.Purpose.SERVER_AUTH, *, cafile=None, capath=None, cadata=None):
if cafile is None:
cafile = certifi.where()
return _create_default_context(purpose=purpose, cafile=cafile, capath=capath, cadata=cadata)
ssl.create_default_context = create_default_context
@task
def wheelhouse(ctx, develop=False, pty=True):
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
ctx.run(cmd, pty=pty)
@task
def install(ctx, develop=False, pty=True):
ctx.run('python setup.py develop')
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip install --upgrade -r {}'.format(req_ | file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
ctx.run(cmd, pty=pty)
@task
def flake(ctx):
"""
Run style and syntax checker. Follows options defined in setup.cfg
"""
ctx.run('flake8 .', pty=True)
@task
def mypy(ctx):
"""
Check python types using mypy (additional level of linting). | Follows options defined in setup.cfg
"""
ctx.run('mypy waterbutler/', pty=True)
@task
def test(ctx, verbose=False, types=False):
flake(ctx)
if types:
mypy(ctx)
cmd = 'py.test --cov-report term-missing --cov waterbutler tests'
if verbose:
cmd += ' -v'
ctx.run(cmd, pty=True)
@task
def celery(ctx, loglevel='INFO', hostname='%h'):
monkey_patch(ctx)
from waterbutler.tasks.app import app
command = ['worker']
if loglevel:
command.extend(['--loglevel', loglevel])
if hostname:
command.extend(['--hostname', hostname])
app.worker_main(command)
@task
def rabbitmq(ctx):
ctx.run('rabbitmq-server', pty=True)
@task
def server(ctx):
monkey_patch(ctx)
if os.environ.get('REMOTE_DEBUG', None):
import pydevd
# e.g. '127.0.0.1:5678'
remote_parts = os.environ.get('REMOTE_DEBUG').split(':')
pydevd.settrace(remote_parts[0], port=int(remote_parts[1]), suspend=False, stdoutToServer=True, stderrToServer=True)
from waterbutler.server.app import serve
serve()
@task
def clean(ctx, verbose=False):
cmd = 'find . -name "*.pyc" -delete'
if verbose:
print(cmd)
ctx.run(cmd, pty=True)
|
silky/PeachPy | examples/nmake/transpose4x4-opt.py | Python | bsd-2-clause | 1,848 | 0.000541 | # This file is part of Peach-Py package and is licensed under the Simplified BSD license.
# See license.rst for the full text of the license.
from peachpy.x86_64 import *
from peachpy import *
matrix = Argument(ptr(float_))
with Function("transpose4x4_opt", (matrix,)):
reg_matrix = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_matrix, matrix)
xmm_rows = [XMMRegister() for _ in range(4)]
for i, xmm_row in enumerate(xmm_rows):
MOVUPS(xmm_row, [reg_matrix + i * XMMRegister.size])
xmm_temps = [XMMRegister() for _ in range(2)]
# xmm_temps[0] = ( m00, m01, m02, m03 )
MOVAPS(xmm_temps[0], xmm_rows[0])
# xmm_temps[1] = ( m20, m21, m22, m23 )
MOVAPS(xmm_temps[1], xmm_rows[2])
# xmm_rows[0] = ( m00, m10, m01, m11 )
UNPCKLPS(xmm_rows[ | 0], xmm_rows[1])
# xmm_rows[2] = ( m20, m30, m21, m31 )
UNPCKLPS(xmm_rows[2], xmm_rows[3])
# xmm_rows[1] = ( m02, m12, m03, m13 )
UNPCKHPS(xmm_temps[0], xmm_rows[1])
xmm_rows[1] = xmm_temps[0]
# xmm_rows[3] = ( m22, m32, m23, m33 )
UNPCKHPS(xmm_temps[1], xmm_rows[3])
xmm_rows[3] = xmm_temps[1]
xmm_temps = [XMMRegister() for _ in range(2)]
# xmm_temps[0] = ( m00, m10, | m01, m11 )
MOVAPS(xmm_temps[0], xmm_rows[0])
# xmm_temps[1] = ( m02, m12, m03, m13 )
MOVAPS(xmm_temps[1], xmm_rows[1])
# xmm_rows[0] = ( m00, m10, m20, m30 )
MOVLHPS(xmm_rows[0], xmm_rows[2])
MOVUPS([reg_matrix], xmm_rows[0])
# xmm_rows[2] = ( m01, m11, m21, m31 )
MOVHLPS(xmm_rows[2], xmm_temps[0])
MOVUPS([reg_matrix + 16], xmm_rows[2])
# xmm_rows[1] = ( m02, m12, m22, m32 )
MOVLHPS(xmm_rows[1], xmm_rows[3])
MOVUPS([reg_matrix + 32], xmm_rows[1])
# xmm_rows[3] = ( m03, m13, m23, m33 )
MOVHLPS(xmm_rows[3], xmm_temps[1])
MOVUPS([reg_matrix + 48], xmm_rows[3])
RETURN()
|
BradburyLab/show_tv | show_tv/app/models/dvr_reader.py | Python | gpl-3.0 | 3,838 | 0.003672 | # coding: utf-8
from .dvr_base import DVRBase
import api
import struct
from io import BytesIO
from tornado import gen
@gen.engine
def call_dvr_cmd(dvr_reader, func, *args, callback, **kwargs):
stream = yield gen.Task(api.connect, dvr_reader.host, dvr_reader.port)
if stream:
def on_result(data):
callback((True, data))
stream.close()
func(*args, stream=stream, callback=on_result, **kwargs)
else:
dvr_reader.l.debug('[DVRReader] failed to connect')
callback((False, None))
def pack_read_cmd(cmd, r_t_p, startstamp, tail_fmt, *tail_args):
return api.pack_rtp_cmd(cmd, r_t_p, "Q" + tail_fmt,
# (4) (Q) Время начала
startstamp,
*tail_args
)
class DVRReader(DVRBase):
commands = {
'load': 0x02,
'range': 0x04,
}
def __init__(self, cfg, host='127.0.0.1', port=7451):
super().__init__(cfg, host, port)
@gen.engine
def request_range(self, r_t_p, startstamp, duration, stream, callback):
'''
'''
self.l.debug('[DVRReader] range start >>>>>>>>>>>>>>>')
if isinstance(startstamp, str):
startstamp = int(startstamp)
if isinstance(duration, str):
duration = int(duration)
endstamp = startstamp + duration
self.l.debug('[DVRReader] => asset = {0}'.format(r_t_p))
self.l.debug('[DVRReader] => start = {0}'.format(startstamp))
self.l.debug('[DVRReader] => end = {0}'.format(endstamp))
pack = pack_read_cmd(self.commands['range'], r_t_p, startstamp, "Q",
# (5) (Q) Время окончания
endstamp,
)
yield gen.Task(stream.write, pack)
data = yield gen.Task(stream.read_bytes, 8, streaming_callback=None)
length = struct.unpack('=Q', data)[0]
self.l.debug('[DVRReader]')
self.l.debug('[DVRReader] <= length = {0}'.format(length))
chunks_data = yield gen.Task(stream.read_bytes, length, streaming_callback=None)
self.l.debug('[DVRReader] <= chunks_data_len = {0}'.format(len(chunks_data)))
io = BytesIO(chunks_data)
playlist = []
while True:
chunk_data = io.read(16)
if len(chunk_data) != 16:
break
self.l.debug('[DVRReader]')
(
startstamp,
duration,
) = struct.unpack('=QQ', chunk_data)
self.l.debug('[DVRReader] <= startstamp = {0}'.format(startstamp))
self.l.debug('[DVRReader] <= duration = {0}'.format(duration))
playlist.append({
'startstamp': startstamp,
'duration': duration,
})
self.l.debug('[DVRReader] range finish <<<<<<<<<<<<<<<\n')
callback(playlist)
@gen.engine
de | f load(self, r_t_p, startstamp, stream, callback):
'''
'''
self.l.debug('[DVRReader] load start >>>>>>>>>>>>>>>')
if isinstance(startstamp, str):
startstamp = int(startstamp)
self.l.debug('[DVRReader] => asset = {0}'.format(r_t_p))
self.l.debug('[DVRReader] => startstamp = {0}'. | format(startstamp))
pack = pack_read_cmd(self.commands['load'], r_t_p, startstamp, '')
yield gen.Task(stream.write, pack)
data = yield gen.Task(stream.read_bytes, 8, streaming_callback=None)
length = struct.unpack('=Q', data)[0]
self.l.debug('[DVRReader]')
self.l.debug('[DVRReader] <= length = {0}'.format(length))
payload = yield gen.Task(stream.read_bytes, length, streaming_callback=None)
self.l.debug('[DVRReader] <= payloadlen = {0}'.format(len(payload)))
self.l.debug('[DVRReader] load finish <<<<<<<<<<<<<<<\n')
callback(payload)
|
JetChars/vim | vim/bundle/python-mode/pymode/libs3/rope/base/pycore.py | Python | apache-2.0 | 15,520 | 0.000451 | import bisect
import difflib
import sys
import warnings
import rope.base.oi.doa
import rope.base.oi.objectinfo
import rope.base.oi.soa
from rope.base import ast, exceptions, taskhandle, utils, stdmods
from rope.base.exceptions import ModuleNotFoundError
from rope.base.pyobjectsdef import PyModule, PyPackage, PyClass
import rope.base.resources
import rope.base.resourceobserver
from rope.base import builtins
class PyCore(object):
def __init__(self, project):
self.project = project
self._init_resource_observer()
self.cache_observers = []
self.module_cache = _ModuleCache(self)
self.extension_cache = _ExtensionCache(self)
self.object_info = rope.base.oi.objectinfo.ObjectInfoManager(project)
self._init_python_files()
self._init_automatic_soa()
self._init_source_folders()
def _init_python_files(self):
self.python_matcher = None
patterns = self.project.prefs.get('python_files', None)
if patterns is not None:
self.python_matcher = rope.base.resources._ResourceMatcher()
self.python_matcher.set_patterns(patterns)
def _init_resource_observer(self):
callback = self._invalidate_resource_cache
observer = rope.base.resourceobserver.ResourceObserver(
changed=callback, moved=callback, removed=callback)
self.observer = rope.base.resourceobserver.FilteredResourceObserver(observer)
self.project.add_observer(self.observer)
def _init_source_folders(self):
self._custom_source_folders = []
for path in self.project.prefs.get('source_folders', []):
folder = self.project.get_resource(path)
self._custom_source_folders.append(folder)
def _init_automatic_soa(self):
if not self.automatic_soa:
return
callback = self._file_changed_for_soa
observer = rope.base.resourceobserver.ResourceObserver(
changed=callback, moved=callback, removed=callback)
self.project.add_observer(observer)
@property
def automatic_soa(self):
auto_soa = self.project.prefs.get('automatic_soi', None)
return self.project.prefs.get('automatic_soa', auto_soa)
def _file_changed_for_soa(self, resource, new_resource=None):
old_contents = self.project.history.\
contents_before_current_change(resource)
if old_contents is not None:
perform_soa_on_changed_scopes(self.project, resource, old_contents)
def is_python_file(self, resource):
if resource.is_folder():
return False
if self.python_matcher is None:
return resource.name.endswith('.py')
return self.python_matcher.does_match(resource)
def get_module(self, name, folder=None):
"""Returns a `PyObject` if the module was found."""
# check if this is a builtin module
pymod = self._builtin_module(name)
if pymod is not None:
return pymod
module = self.find_module(name, folder)
if module is None:
raise ModuleNotFoundError('Module %s not found' % name)
return self.resource_to_pyobject(module)
def _builtin_submodules(self, modname):
result = {}
for extension in self.extension_modules:
if extension.startswith(modname + '.'):
name = extension[len(modname) + 1:]
if '.' not in name:
result[name] = self._builtin_module(extension)
return result
def _builtin_module(self, name):
return self.extension_cache.get_pymodule(name)
def get_relative_module(self, name, folder, level):
module = self.find_relative_module(name, folder, level)
if module is None:
raise ModuleNotFoundError('Module %s not found' % name)
return self.resource_to_pyobject(module)
def get_string_module(self, code, resource=None, force_errors=False):
"""Returns a `PyObject` object for the given code
If `force_errors` is `True`, `exceptions.ModuleSyntaxError` is
raised if module has syntax errors. This overrides
``ignore_syntax_errors`` project config | .
"""
return PyModule(self, code, resource, force_errors=force_errors)
def get_string_scope(self, code, resource=None):
"""Returns a `Scope` object for the given code"""
return self.get_string_module(code, resource).get_scope()
| def _invalidate_resource_cache(self, resource, new_resource=None):
for observer in self.cache_observers:
observer(resource)
def _find_module_in_folder(self, folder, modname):
module = folder
packages = modname.split('.')
for pkg in packages[:-1]:
if module.is_folder() and module.has_child(pkg):
module = module.get_child(pkg)
else:
return None
if module.is_folder():
if module.has_child(packages[-1]) and \
module.get_child(packages[-1]).is_folder():
return module.get_child(packages[-1])
elif module.has_child(packages[-1] + '.py') and \
not module.get_child(packages[-1] + '.py').is_folder():
return module.get_child(packages[-1] + '.py')
def get_python_path_folders(self):
import rope.base.project
result = []
for src in self.project.prefs.get('python_path', []) + sys.path:
try:
src_folder = rope.base.project.get_no_project().get_resource(src)
result.append(src_folder)
except rope.base.exceptions.ResourceNotFoundError:
pass
return result
def find_module(self, modname, folder=None):
"""Returns a resource corresponding to the given module
returns None if it can not be found
"""
return self._find_module(modname, folder)
def find_relative_module(self, modname, folder, level):
for i in range(level - 1):
folder = folder.parent
if modname == '':
return folder
else:
return self._find_module_in_folder(folder, modname)
def _find_module(self, modname, folder=None):
"""Return `modname` module resource"""
for src in self.get_source_folders():
module = self._find_module_in_folder(src, modname)
if module is not None:
return module
for src in self.get_python_path_folders():
module = self._find_module_in_folder(src, modname)
if module is not None:
return module
if folder is not None:
module = self._find_module_in_folder(folder, modname)
if module is not None:
return module
return None
# INFO: It was decided not to cache source folders, since:
# - Does not take much time when the root folder contains
# packages, that is most of the time
# - We need a separate resource observer; `self.observer`
# does not get notified about module and folder creations
def get_source_folders(self):
"""Returns project source folders"""
if self.project.root is None:
return []
result = list(self._custom_source_folders)
result.extend(self._find_source_folders(self.project.root))
return result
def resource_to_pyobject(self, resource, force_errors=False):
return self.module_cache.get_pymodule(resource, force_errors)
def get_python_files(self):
"""Returns all python files available in the project"""
return [resource for resource in self.project.get_files()
if self.is_python_file(resource)]
def _is_package(self, folder):
if folder.has_child('__init__.py') and \
not folder.get_child('__init__.py').is_folder():
return True
else:
return False
def _find_source_folders(self, folder):
for resource in folder.get_folders():
if self._is_package(resource):
return [folder]
|
friedrichromstedt/moviemaker3 | moviemaker3/stacks/weighted.py | Python | mit | 1,580 | 0.00443 | from fframework import asfunction
from moviemaker3.stacks.stack import Stack
class WeightedStack(Stack):
"""Elements in the WeightedStack should return (*weight*, *layer*);
*layer* and *weight* are extracted by indexing (tuple assignment). You
might use ``fframework.compound()`` to generate tuple Functions."""
def __init__(self, zero_layer=None, zero_weight=None):
"""*zero_layer* is the 0 to use in summing up the layers (the start
value), it defaults to 0.
*zero_weight* is the 0 to use in summing up the weights, it default to
0, too."""
if zero_layer is None:
zero_layer = 0
if zero_weight is None:
zero_weight = 0
Stack.__init__(self)
self.zero_layer = asfunction(zero_layer)
self.zero_weight = asfunction(zero_weight)
def __call__(self, ps):
"""Blends the layers | together. Note that if all weights are zero,
the result is undefined. The start value for summing up the layers
| is *self.zero_layer*. The start value for summing up the weights is
*self.zero_weight*. Both are evaluated with *ps*."""
sumlayer = self.zero_layer(ps)
weightsum = self.zero_weight(ps)
for layer in self.elements:
(weight, layer) = layer(ps)
# We don't use augmented arithmetics because we might want to
# employ broadcasting.
sumlayer = sumlayer + weight * layer
weightsum = weight + weightsum
return sumlayer / weightsum
|
aknackiron/testdroid-samples | appium/sample-scripts/python/testdroid_ios.py | Python | apache-2.0 | 5,790 | 0.0038 | ##
## For help on setting up your machine and configuring this TestScript go to
## http://docs.bitbar.com/testing/appium/
##
import os
import time
import unittest
from time import sleep
from appium import webdriver
from device_finder import DeviceFinder
def log(msg):
print (time.strftime("%H:%M:%S") + ": " + msg)
class TestdroidIOS(unittest.TestCase):
"""
Take screenshot and store files to defined location, with numbering prefix
:Args:
- name - files are stored as #_name
"""
def screenshot(self, name):
screenshot_name = str(self.screenshot_count) + "_" + name + ".png"
log("Taking screenshot: " + screenshot_name)
self.driver.save_screenshot(self.screenshot_dir + "/" + screenshot_name)
self.screenshot_count += 1
def setUp(self):
##
## IMPORTANT: Set the following parameters.
##
testdroid_url = os.environ.get('TESTDROID_URL') or "https://cloud.bitbar.com"
appium_url = os.environ.get('TESTDROID_APPIUM_URL') or 'https://appium.bitbar.com/wd/hub'
testdroid_apiKey = os.environ.get('TESTDROID_APIKEY') or ""
testdroid_project_name = os.environ.get('TESTDROID_PROJECT') or "iOS sample project"
testdroid_testrun_name = os.environ.get('TESTDROID_TESTRUN') or "My testrun"
testdroid_app = os.environ.get('TESTDROID_APP') or ""
testdroid_bundle_id = os.environ.get('TESTDROID_BUNDLE_ID') or "com.bitbar.testdroid.BitbarIOSSample"
new_command_timeout = os.environ.get('TESTDROID_CMD_TIMEOUT') or '60'
testdroid_test_timeout = os.environ.get('TESTDROID_TEST_TIMEOUT') or '600'
testdroid_find_device = os.environ.get('TESTDROID_FINDDEVICE') or "true"
automation_name = os.environ.get('TESTDROID_AUTOMATION_NAME') or "XCUITest"
self.screenshot_dir = os.environ.get('TESTDROID_SCREENSHOTS') or os.getcwd() + "/screenshots"
log("Will save screenshots at: " + self.screenshot_dir)
self.screenshot_count = 1
# Options to select device
# 1) Set environment variable TESTDROID_DEVICE
# 2) Set device name to this python script
# 3) Do not set #1 and #2 and let DeviceFinder to find free device for you
testdroid_device = os.environ.get('TESTDROID_DEVICE') or ""
deviceFinder = DeviceFinder(url=testdroid_url)
if testdroid_device == "":
# Loop will not exit until free device is found
while testdroid_device == "":
testdroid_device = deviceFinder.available_ios_device()
print "Starting Appium test using device '%s'" % testdroid_device
desired_capabilities_cloud = {}
desired_capabilities_ | cloud['testdroid_apiKey'] = testdroid_apiKey
desired_capabilities_cloud['testdroid_target'] = 'ios'
desired | _capabilities_cloud['testdroid_project'] = testdroid_project_name
desired_capabilities_cloud['testdroid_testrun'] = testdroid_testrun_name
desired_capabilities_cloud['testdroid_device'] = testdroid_device
desired_capabilities_cloud['testdroid_app'] = testdroid_app
desired_capabilities_cloud['platformName'] = 'iOS'
desired_capabilities_cloud['deviceName'] = 'iPhone device'
desired_capabilities_cloud['newCommandTimeout'] = new_command_timeout
desired_capabilities_cloud['testdroid_testTimeout'] = testdroid_test_timeout
desired_capabilities_cloud['testdroid_findDevice'] = testdroid_find_device
desired_capabilities_cloud['automationName'] = automation_name
desired_capabilities_cloud['app'] = testdroid_bundle_id
# set up webdriver
log("WebDriver request initiated. Waiting for response, this typically takes 2-3 mins")
self.driver = webdriver.Remote(command_executor=appium_url, desired_capabilities=desired_capabilities_cloud)
log("WebDriver response received")
def tearDown(self):
log("Quitting")
self.driver.quit()
def testSample(self):
# view1
log("view1: Finding buttons")
buttons = self.driver.find_elements_by_class_name('UIAButton')
log("view1: Clicking button [0] - RadioButton 1")
buttons[0].click()
log("view1: Typing in textfield[0]: Testdroid user")
elem = self.driver.find_element_by_class_name('UIATextField')
elem.clear()
elem.send_keys('Testdroid user')
log("view1: Taking screenshot screenshot1.png")
self.screenshot("screenshot1")
log("view1: Hiding Keyboard")
self.driver.find_element_by_xpath("//*[contains(@name, 'Return')]").click()
log("view1: Taking screenshot screenshot2.png")
self.screenshot("screenshot2")
log("view1: Clicking button[6] - OK Button")
buttons[6].click()
log("view2: Taking screenshot screenshot3.png")
self.screenshot("screenshot3")
# view2
log("view2: Finding buttons")
buttons = self.driver.find_elements_by_class_name('UIAButton')
log("view2: Clicking button[0] - Back/OK button")
buttons[0].click()
# view1
log("view1: Finding buttons")
buttons = self.driver.find_elements_by_class_name('UIAButton')
log("view1: Clicking button[2] - RadioButton 2")
buttons[2].click()
log("view1: Clicking button[6] - OK Button")
buttons[6].click()
log("view1: Taking screenshot screenshot4.png")
self.screenshot("screenshot4")
log("view1: Sleeping 3 before quitting webdriver.")
sleep(3)
def initialize():
return TestdroidIOS
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestdroidIOS)
unittest.TextTestRunner(verbosity=2).run(suite)
|
Radagast-red/golem | tests/golem/network/p2p/test_node.py | Python | gpl-3.0 | 1,163 | 0 | import unittest
from golem.network.p2p.node import Node
def is_ip_address(address):
"""
Check if @address is correct IP address
:param address: Address to be checked
:return: True if is correct, false otherwise
"""
from ipaddress import ip_address, AddressValueError
try:
# will raise error in case of incorrect address
ip_address(unicode(ad | dress))
return True
except (Value | Error, AddressValueError):
return False
class TestNode(unittest.TestCase):
def test_str(self):
n = Node(node_name="Blabla", key="ABC")
self.assertNotIn("at", str(n))
self.assertNotIn("at", "{}".format(n))
self.assertIn("Blabla", str(n))
self.assertIn("Blabla", "{}".format(n))
self.assertIn("ABC", str(n))
self.assertIn("ABC", "{}".format(n))
def test_collect_network_info(self):
""" Test configuring Node object """
node = Node()
node.collect_network_info()
assert is_ip_address(node.pub_addr)
assert is_ip_address(node.prv_addr)
for address in node.prv_addresses:
assert is_ip_address(address)
|
LandRegistry/drv-flask-based-prototype | service/api_client.py | Python | mit | 898 | 0.001114 | import math
from service.fake_api_results import ALL_TITLES, OFFICIAL_COPY_RESULT, SELECTED_FULL_RESULTS
SEARCH_RESULTS_PER_PAGE = 20
def get_title(title_number):
return SELECTED_FULL_RESULTS.get(title_number)
def _get_titles(page_number):
nof_results = len(ALL_TITLES)
number_pages = math.ceil(nof_results / SEARCH_RESULTS_PER_PAGE)
start_index = page_number * SEARCH_RESULTS_PER_PAGE
end_index = start_index + SEARCH_RESULTS_PER_PAGE
return {
'number_pages': number_pages,
'number_results': nof_results,
| 'page_number': page_number,
'titles': ALL_TITLES[start_index:end_index],
}
def get_titles_by_postcode(postcode, page_number):
return _get_titles(page_number)
def get_titles_by_address(address, page_number):
return _get_titles(page_number)
def get_official_copy_data(tit | le_number):
return OFFICIAL_COPY_RESULT
|
eagleamon/home-assistant | homeassistant/components/media_player/__init__.py | Python | apache-2.0 | 28,502 | 0.000035 | """
Component to interface with various media players.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/media_player/
"""
import asyncio
from datetime import timedelta
import functools as ft
import hashlib
import logging
import os
from random import SystemRandom
from aiohttp import web
import async_timeout
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView, KEY_AUTHENTICATED
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async import run_coroutine_threadsafe
from homeassistant.const import (
STATE_OFF, STATE_UNKNOWN, STATE_PLAYING, STATE_IDLE,
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON,
SERVICE_VOLUME_UP, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_SET,
SERVICE_VOLUME_MUTE, SERVICE_TOGGLE, SERVICE_MEDIA_STOP,
SERVICE_MEDIA_PLAY_PAUSE, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_MEDIA_SEEK)
_LOGGER = logging.getLogger(__name__)
_RND = SystemRandom()
DOMAIN = 'media_player'
DEPENDENCIES = ['http']
SCAN_INTERVAL = timedelta(seconds=10)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ENTITY_IMAGE_URL = '/api/media_player_proxy/{0}?token={1}&cache={2}'
ATTR_CACHE_IMAGES = 'images'
ATTR_CACHE_URLS = 'urls'
ATTR_CACHE_MAXSIZE = 'maxsize'
ENTITY_IMAGE_CACHE = {
ATTR_CACHE_IMAGES: {},
ATTR_CACHE_URLS: [],
ATTR_CACHE_MAXSIZE: 16
}
CONTENT_TYPE_HEADER = 'Content-Type'
SERVICE_PLAY_MEDIA = 'play_media'
SERVICE_SELECT_SOURCE = 'select_source'
SERVICE_CLEAR_PLAYLIST = 'clear_playlist'
ATTR_MEDIA_VOLUME_LEVEL = 'volume_level'
ATTR_MEDIA_VOLUME_MUTED = 'is_volume_muted'
ATTR_MEDIA_SEEK_POSITION = 'seek_position'
ATTR_MEDIA_CONTENT_ID = 'media_content_id'
ATTR_MEDIA_CONTENT_TYPE = 'media_content_type'
ATTR_MEDIA_DURATION = 'media_duration'
ATTR_MEDIA_POSITION = 'media_position'
ATTR_MEDIA_POSITION_UPDATED_AT = 'media_position_updated_at'
ATTR_MEDIA_TITLE = 'media_title'
ATTR_MEDIA_ARTIST = 'media_artist'
ATTR_MEDIA_ALBUM_NAME = 'media_album_name'
ATTR_MEDIA_ALBUM_ARTIST = 'media_album_artist'
ATTR_MEDIA_TRACK = 'media_track'
ATTR_MEDIA_SERIES_TITLE = 'media_series_title'
ATTR_MEDIA_SEASON = 'media_season'
ATTR_MEDIA_EPISODE = 'media_episode'
ATTR_MEDIA_CHANNEL = 'media_channel'
ATTR_MEDIA_PLAYLIST = 'media_playlist'
ATTR_APP_ID = 'app_id'
ATTR_APP_NAME = 'app_name'
ATTR_SUPPORTED_MEDIA_COMMANDS = 'supported_media_commands'
ATTR_INPUT_SOURCE = 'source'
ATTR_INPUT_SOURCE_LIST = 'source_list'
ATTR_MEDIA_ENQUEUE = 'enqueue'
MEDIA_TYPE_MUSIC = 'music'
MEDIA_TYPE_TVSHOW = 'tvshow'
MEDIA_TYPE_VIDEO = 'movie'
MEDIA_TYPE_EPISODE = 'episode'
MEDIA_TYPE_CHANNEL = 'channel'
MEDIA_TYPE_PLAYLIST = 'playlist'
SUPPORT_PAUSE = 1
SUPPORT_SEEK = 2
SUPPORT_VOLUME_SET = 4
SUPPORT_VOLUME_MUTE = 8
SUPPORT_PREVIOUS_TRACK = 16
SUPPORT_NEXT_TRACK = 32
SUPPORT_TURN_ON = 128
SUPPORT_TURN_OFF = 256
SUPPORT_PLAY_MEDIA = 512
SUPPORT_VOLUME_STEP = 1024
SUPPORT_SELECT_SOURCE = 2048
SUPPORT_STOP = 4096
SUPPORT_CLEAR_PLAYLIST = 8192
SUPPORT_PLAY = 16384
# Service call validation schemas
MEDIA_PLAYER_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
MEDIA_PLAYER_SET_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float,
})
MEDIA_PLAYER_MUTE_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean,
})
MEDIA_PLAYER_MEDIA_SEEK_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_SEEK_POSITION):
vol.All(vol.Coerce(float), vol.Range(min=0)),
})
MEDIA_PLAYER_SELECT_SOURCE_SCHEMA = ME | DIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_INPUT_SOURCE): cv.string,
})
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
vol.Optional(ATTR_MEDIA_ENQUEUE): cv.boolean,
})
SERVICE_TO_METHOD = {
SERVICE_TURN_ON: {'method': 'async_turn_on'},
SERVICE_TURN_OFF: {'method': 'async_turn_off'} | ,
SERVICE_TOGGLE: {'method': 'async_toggle'},
SERVICE_VOLUME_UP: {'method': 'async_volume_up'},
SERVICE_VOLUME_DOWN: {'method': 'async_volume_down'},
SERVICE_MEDIA_PLAY_PAUSE: {'method': 'async_media_play_pause'},
SERVICE_MEDIA_PLAY: {'method': 'async_media_play'},
SERVICE_MEDIA_PAUSE: {'method': 'async_media_pause'},
SERVICE_MEDIA_STOP: {'method': 'async_media_stop'},
SERVICE_MEDIA_NEXT_TRACK: {'method': 'async_media_next_track'},
SERVICE_MEDIA_PREVIOUS_TRACK: {'method': 'async_media_previous_track'},
SERVICE_CLEAR_PLAYLIST: {'method': 'async_clear_playlist'},
SERVICE_VOLUME_SET: {
'method': 'async_set_volume_level',
'schema': MEDIA_PLAYER_SET_VOLUME_SCHEMA},
SERVICE_VOLUME_MUTE: {
'method': 'async_mute_volume',
'schema': MEDIA_PLAYER_MUTE_VOLUME_SCHEMA},
SERVICE_MEDIA_SEEK: {
'method': 'async_media_seek',
'schema': MEDIA_PLAYER_MEDIA_SEEK_SCHEMA},
SERVICE_SELECT_SOURCE: {
'method': 'async_select_source',
'schema': MEDIA_PLAYER_SELECT_SOURCE_SCHEMA},
SERVICE_PLAY_MEDIA: {
'method': 'async_play_media',
'schema': MEDIA_PLAYER_PLAY_MEDIA_SCHEMA},
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_SUPPORTED_MEDIA_COMMANDS,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
]
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(not hass.states.is_state(entity_id, STATE_OFF)
for entity_id in entity_ids)
def turn_on(hass, entity_id=None):
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id=None):
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def toggle(hass, entity_id=None):
"""Toggle specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
def volume_up(hass, entity_id=None):
"""Send the media player the command for volume up."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)
def volume_down(hass, entity_id=None):
"""Send the media player the command for volume down."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)
def mute_volume(hass, mute, entity_id=None):
"""Send the media player the command for muting the volume."""
data = {ATTR_MEDIA_VOLUME_MUTED: mute}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE, data)
def set_volume_level(hass, volume, entity_id=None):
"""Send the media player the command for setting the volume."""
data = {ATTR_MEDIA_VOLUME_LEVEL: volume}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_SET, data)
def media_play_pause(hass, entity_id=None):
"""Send the |
janpipek/boadata | boadata/commands/boaview.py | Python | mit | 911 | 0.001098 | #!/usr/bin/env python3
import sys
import click
from boadata import __version__
from boadata.cli import try_load, try_apply_sql, qt_app
@click.command()
@click.version_option(__version__)
@click.argument("uri")
@click.option("-s", "--sql", required=False, help="SQL to run on the object.")
@click.option("-t", "--type", default=None, help="What type is the object.")
@click.option("-p", "--parameter", help="Additional parameters for loader, specified as key=value", multiple=True)
def run_app(uri, type, parameter, **kwargs):
kwargs = {key: value for key, value in kwargs.items() if va | lue is not None}
do = try_load(uri, type, parameters=param | eter)
do = try_apply_sql(do, kwargs)
with qt_app():
from boadata.gui.qt import DataObjectWindow
window = DataObjectWindow(do)
window.show()
window.setWindowTitle(do.uri)
if __name__ == "__main__":
run_app()
|
thinkopensolutions/l10n-brazil | financial/models/financial_document_type.py | Python | agpl-3.0 | 599 | 0 | # -*- coding: utf-8 -*-
# Copyright 2017 KMEE
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from __future__ import division, print_function, unicode_literals
from odoo import fields, models
class Financial | DocumentType(models.Model):
_name = b'financial.document.type'
_description = 'Financial Document Type'
name = fields.Char(
string='Document Type',
size=30,
required=True,
index=True,
)
account_id = fields.Many2one(
c | omodel_name='financial.account',
string='Account',
ondelete='restrict',
)
|
curtisallen/Alarmageddon | alarmageddon/validations/cassandra.py | Python | apache-2.0 | 4,308 | 0.001625 | """Convenience Validations for working with Cassandra"""
from fabric.operations import run
from alarmageddon.validations.validation import Priority
from alarmageddon.validations.ssh import SshValidation
def _get_percentage(text):
"""Converts strings like '12.2' or '32.4%' into floating point numbers."""
text = text.strip()
if text.endswith('%'):
text = text[:-1]
return float(text)
class CassandraStatusValidation(SshValidation):
"""Validate that the Cassandra ring is within expected parameters.
Check that the specified Cassandra ring is in the specified
state and that the ring ownership of the nodes is within a certain
threshold.
:param ssh_contex: An SshContext class, for accessing the hosts.
:param service_state: The expected service state value (defaults to
"UN").
:param | number_nodes: The expected number of cassandra nodes in the ring.
:param owns_threshold: The maximum percentage of the ring owned by a node.
:param priority: The Priority level of this validation.
:param timeout: How long to attempt to connect to the host.
:param hosts: The hosts to connect to.
.. note:
This is not designed for multi reg | ion Cassandra clusters.
"""
def __init__(self, ssh_context, service_state="UN",
number_nodes=5, owns_threshold=40,
priority=Priority.NORMAL, timeout=None,
hosts=None):
super(CassandraStatusValidation,self).__init__(ssh_context,
"Cassandra nodetool status",
priority=priority,
timeout=timeout, hosts=hosts)
self.service_state = service_state
self.number_nodes = number_nodes
self.owns_threshold = owns_threshold
def perform_on_host(self, host):
"""Runs nodetool status and parses the output."""
output = run(
"nodetool status | " +
"egrep '([0-9]{1,3}\\.){3}[0-9]{1,3}' | " +
"awk 'BEGIN { FS = \" \" }; { print $1,$2,$5 }'")
if "Exception" in output:
self.fail_on_host(host, "An exception occurred while " +
"checking Cassandra cluster health on {0} ({1})"
.format((host, output)))
parsed = [line.split() for line in output.splitlines() if line.strip()]
self.check(host, parsed)
def check(self, host, output):
"""Compares the results of nodetool status to the expected results."""
#Number of nodes check
if len(output) < self.number_nodes:
self.fail_on_host(host,
"Cassandra cluster has {0} nodes but" +
"should have {1}"
.format(len(output), self.number_nodes))
# Validate each node's properties in nodetool's output
for fields in output:
state = fields[0]
owns = fields[2]
node = fields[1]
# While a node is joining the cluster, don't check it for errors.
if state == 'UJ':
continue
#check for status
if state != self.service_state:
self.fail_on_host(host,
"Cassandra node {0} is in " +
"state {1} but the expected state is {2}"
.format(node, state, self.service_state))
#check for owns threshold
try:
owns_value = _get_percentage(owns)
if owns_value > self.owns_threshold:
self.fail_on_host(host,
"Cassandra node {0} owns {1} " +
"percent of the ring which exceeds" +
"threashold of {3}"
.format(node, owns_value,
self.owns_threshold))
except ValueError:
self.fail_on_host(host,
"Expected nodetool to output an ownership " +
"percentage but got: {0}".format(owns))
|
stefraynaud/spanlib | scripts/quickview.py | Python | lgpl-2.1 | 398 | 0.017588 | #########################
# Simple netcdf plotter #
#########################
# Needed modules
import vcs, sys, cdms
# Arguments
if len(s | ys.argv) < 3:
print 'Usage: python quickview.py <filename> <varname>'
sys.exit(1)
filename = sys.argv[1]
varname = sys.argv[2]
# Open netcdf file
f | =cdms.open(filename)
# Read our variable
s=f(varname)
# Create vcs canvas
x=vcs.init()
# Plot it
x.plot(s) |
mlperf/training_results_v0.6 | NVIDIA/benchmarks/maskrcnn/implementations/pytorch/maskrcnn_benchmark/solver/lr_scheduler.py | Python | apache-2.0 | 2,161 | 0.000925 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved.
from bisect import bisect_right
import torch
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear", "mlperf_linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iter | s = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
d | ef get_lr(self):
warmup_factor = 1
# optional offset to each base_lr
delta = 0.
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
# MLPerf-specific warmup definition
elif self.warmup_method == "mlperf_linear":
delta = (self.warmup_iters - self.last_epoch) * self.warmup_factor
return [
(base_lr - delta)
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
|
msegado/edx-platform | pavelib/paver_tests/test_database.py | Python | agpl-3.0 | 8,779 | 0.002962 | """
Tests for the Paver commands for updating test databases and its utility methods
"""
import os
import shutil
import tarfile
from tempfile import mkdtemp
from unittest import TestCase
import boto
from mock import call, patch, Mock
from pavelib import database
from pavelib.utils import db_utils
from pavelib.utils.db_utils import extract_files_from_zip
from pavelib.utils.envs import Env
from .utils import PaverTestCase
class TestPaverDbUtils(TestCase):
""" Tests for paver bokchoy database utils """
@patch('pavelib.utils.db_utils.verify_files_exist')
def test_extract_files_from_zip(self, _mock_verify):
test_dir = mkdtemp()
output_dir = mkdtemp()
self.addCleanup(shutil.rmtree, test_dir)
self.addCleanup(shutil.rmtree, output_dir)
tmp_file_name = os.path.join(test_dir, 'test.txt')
with open(tmp_file_name, 'w') as tmp_file:
tmp_file.write('Test file content')
tmp_tarfile = os.path.join(test_dir, 'test.tar.gz')
with tarfile.open(name=tmp_tarfile, mode='w:gz') as tar_file:
tar_file.add(tmp_file_name, arcname='test.txt')
extract_files_from_zip(['test.txt'], tmp_tarfile, output_dir)
extracted_file = os.path.join(output_dir, 'test.txt')
assert os.path.isfile(extracted_file)
with open(extracted_file) as test_file:
data = test_file.read()
assert data == 'Test file content'
def _write_temporary_db_cache_files(path, files):
"""
create some temporary files to act as the local db cache files so that
we can compute a fingerprint
"""
for index, filename in enumerate(files):
filepath = os.path.join(path, filename)
with open(filepath, 'w') as cache_file:
cache_file.write(str(index))
class TestPaverDatabaseTasks(PaverTestCase):
"""
Tests for the high level database tasks
"""
def setUp(self):
super().setUp()
# This value is the actual sha1 fingerprint calculated for the dummy
# files used in these tests
self.expected_fingerprint = 'ccaa8d8dcc7d030cd6a6768db81f90d0ef976c3d'
self.fingerprint_filename = '{}.tar.gz'.format(self.expected_fingerprint)
self.bucket = Mock(name='test_bucket')
@patch.object(db_utils, 'CACHE_FOLDER', mkdtemp())
@patch.object(db_utils, 'FINGERPRINT_FILEPATH', os.path.join(mkdtemp(), 'fingerprint'))
@patch.object(db_utils, 'sh')
def test_load_data_from_local_cache(self, _mock_sh):
"""
Assuming that the computed db cache file fingerprint is the same as
the stored fingerprint, verify that we make a call to load data into
the database without running migrations
"""
self.addCleanup(shutil.rmtree, db_utils.CACHE_FOLDER)
self.addCleanup(os.remove, db_utils.FINGERPRINT_FILEPATH)
_write_temporary_db_cache_files(db_utils.CACHE_FOLDER, database.ALL_DB_FILES)
# write the local fingerprint file with the same value than the
# computed fingerprint
with open(db_utils.FINGERPRINT_FILEPATH, 'w') as fingerprint_file:
fingerprint_file.write(self.expected_fingerprint)
with patch.object(db_utils, 'get_file_from_s3', wraps=db_utils.get_file_from_s3) as _mock_get_file:
database.update_local_bokchoy_db_from_s3() # pylint: disable=no-value-for-parameter
# Make sure that the local cache files are used - NOT downloaded from s3
self.assertFalse(_mock_get_file.called)
calls = [
call('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call('{}/scripts/reset-test-db.sh --use-existing-db'.format(Env.REPO_ROOT))
]
_mock_sh.assert_has_calls(calls)
@patch.object(database, 'CACHE_BUCKET_NAME', 'test_bucket')
@patch.object(db_utils, 'CACHE_FOLDER', mkdtemp())
@patch.object(db_utils, 'FINGERPRINT_FILEPATH', os.path.join(mkdtemp(), 'fingerprint'))
@patch.object(db_utils, 'sh')
def test_load_data_from_s3_fingerprint(self, _mock_sh):
"""
Assuming that the computed db cache file fingerprint is different
than the stored fingerprint AND there is a matching fingerprint file
in s3, verify that we make a call to load data into the database
without running migrations
"""
self.addCleanup(shutil.rmtree, db_utils.CACHE_FOLDER)
self.addCleanup(os.remove, db_utils.FINGERPRINT_FILEPATH)
_write_temporary_db_cache_files(db_utils.CACHE_FOLDER, database.ALL_DB_FILES)
# zip the temporary files and push them to s3 bucket
zipfile_path = os.path.join(db_utils.CACHE_FOLDER, self.fingerprint_filename)
with tarfile.open(name=zipfile_path, mode='w:gz') as tar_file:
for name in database.ALL_DB_FILES:
tar_file.add(os.path.join(db_utils.CACHE_FOLDER, name), arcname=name)
key = boto.s3.key.Key(bucket=self.bucket, name=self.fingerprint_filename)
key.set_contents_from_filename(zipfile_path, replace=False)
# write the local fingerprint file with a different value than
# the computed fingerprint
local_fingerprint = '123456789'
with open(db_utils.FINGERPRINT_FILEPATH, 'w') as fingerprint_file:
fingerprint_file.write(local_fingerprint)
with patch('boto.connect_s3', Mock(return_value=Mock())):
with patch.object(db_utils, 'get_file_from_s3') as _mock_get_file:
database.update_local_bokchoy_db_from_s3() # pylint: disable=no-value-for-parameter
# Make sure that the fingerprint file is downloaded from s3
_mock_get_file.assert_called_once_with(
'test_bucket', self.fingerprint_filename, db_utils.CACHE_FOLDER
)
calls = [
call('{}/scripts/reset-test-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call('{}/scripts/reset-test-db.sh --use-existing-db'.format(Env.REPO_ROOT))
]
_mock_sh.assert_has_calls(calls)
@patch.object(database, 'CACHE_BUCKET_NAME', 'test_bucket')
@patch.object(db_utils, 'CACHE_FOLDER', mkdtemp())
@patch.object(db_utils, 'FINGERPRINT_FILEPATH', os.path.join(mkdtemp(), 'fingerprint'))
@patch.object(db_utils, 'sh')
def test_load_data_and_run_migrations(self, _mock_sh):
"""
Assuming that the computed db cache file fingerprint is different
than the stored fingerprint AND there is NO matching fingerprint file
in s3, verify that we make a call to load data into the database, run
migrations and update the local db cache files
"""
self.addCleanup(shutil.rmtree, db_utils.CACHE_FOLDER)
self.addCleanup(os.remove, db_utils.FINGERPRINT_FILEPATH)
_write_temporary_db_cache_files(db_utils.CACHE_FOLDER, database.ALL_DB_FILES)
# write the local fingerprint file with a different value than
# the computed fingerprint
local_fingerprint = '123456789'
with open(db_utils.FINGERPRINT_FILEPATH, 'w') as fingerprint_file:
fingerprint_file.write(local_fingerprint)
database.update_local_bokchoy_db_from_s3() # pylint: disable=no-value-for-parameter
calls = [
call('{}/scripts/reset-t | est-db.sh --calculate_migrations'.format(Env.REPO_ROOT)),
call('{}/scripts/reset-test-db.sh --rebuild_cache --use-existing-db'.format(Env.REPO_ROOT))
]
_mock_sh.assert_has_calls(calls)
@patch.object(database, 'CACHE_BUCKET_NAME', 'test_bucket')
@patch.object(db_ut | ils, 'CACHE_FOLDER', mkdtemp())
@patch.object(db_utils, 'FINGERPRINT_FILEPATH', os.path.join(mkdtemp(), 'fingerprint'))
@patch.object(db_utils, 'sh')
def test_updated_db_cache_pushed_to_s3(self, _mock_sh):
"""
Assuming that the computed db cache file fingerprint is different
than the stored fingerprint AND there is NO matching fingerprint file
in s3, verify that an updated fingeprint file is pushed to s3
"""
self.addCleanup(shutil.rmtree, db_utils.CACHE_FOL |
junkoda/fs2 | test/test_pm_force.py | Python | gpl-3.0 | 990 | 0 | #
# Test PM for | ce parallelisation:
# check force does not depend on number of MPI nodes
import fs
import numpy as np
import h5py
import pm_setup
# read reference file
# $ python3 create_force_h5.py to create
file = h5py.File('force_% | s.h5' % fs.config_precision(), 'r')
ref_id = file['id'][:]
ref_force = file['f'][:]
file.close()
# compute PM force
fs.msg.set_loglevel(0)
particles = pm_setup.force()
particle_id = particles.id
particle_force = particles.force
# compare two forces
if fs.comm.this_node() == 0:
assert(np.all(particle_id == ref_id))
print('pm_force id OK')
force_rms = np.std(ref_force)
diff = particle_force - ref_force
diff_rms = np.std(diff)
print('pm_force rms error %e / %e' % (diff_rms, force_rms))
diff_max = np.max(np.abs(diff))
print('pm_force max error %e / %e' % (diff_max, force_rms))
eps = np.finfo(particle_force.dtype).eps
assert(diff_rms < 20*eps)
assert(diff_max < 1000*eps)
print('pm_force OK')
|
gencer/python-phonenumbers | python/phonenumbers/shortdata/region_LY.py | Python | apache-2.0 | 556 | 0.008993 | """Auto-generated file, do not edit by hand. LY metadata"""
from ..ph | onemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_LY = PhoneMetadata(id='LY', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='19[013]', example_number='193', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='19[013]', example_number='193', possible_length=(3,)),
sh | ort_data=True)
|
jamarrange/sort | build/fileSort/Arranger.py | Python | mit | 12,285 | 0.003093 | '''
|--------------------------------------------------------------------------
|
| Jam arrange: GUI linked to arrangement algorithm
| Author: Victor Motha
| Copyright 2016
| Objective: Sort through audio files and sort them according to artist names.
| Current stable version: 0.0.4
|
'''
'''
|--------------------------------------------------------------------------
| Importing built-in and External packages:
|--------------------------------------------------------------------------
|
| This is where we import all our built-in python packages, such as os,
| and shutil to allow for access to OS dir and file manipulation. We
| also import the TinyTag external package to access file meta-data.
|
'''
import os
import os.path
# from os import listdir
from os.path import join # isfile
import shutil
from tinytag import TinyTag
'''
|--------------------------------------------------------------------------
| Arranging algorithm:
|--------------------------------------------------------------------------
|
| This is where we call all functions linked to arranging algorithm
| through the - Arranger - class.
|
'''
class Arranger(object):
"""Arranging algorithm for jamarrange."""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
'''
|--------------------------------------------------------------------------
| Repalcing special characters: Dir names
|--------------------------------------------------------------------------
|
| This is where we replace all special characters when reading dir names
| to ensure python can evaluate through these files as strings and
| escape these characters.
|
'''
def replace_special_chars_files(self, temp):
reserved_chars = ['"',"'"]
for i in range(len(reserved_chars)):
if reserved_chars[i] in temp:
temp = temp.replace(reserved_chars[i],' ')
return temp
'''
|--------------------------------------------------------------------------
| Repalcing special characters: File names
|--------------------------------------------------------------------------
|
| This is where we replace all special characters when reading file names
| to ensure python can evaluate through these files as strings and
| escape these characters. This is also to ensure we conform to
| OS directory naming conventions as is highlighted in the
| - directory_handling - tutorial.
|
'''
def replace_special_chars(self, temp):
reserved_chars = ['<', '>', ':', '"', '/', '\\', '|', '?', '*'] # see tutorial for explanation
if temp is not None:
for i in range(len(reserved_chars)):
if reserved_chars[i] in temp:
temp = temp.replace(reserved_chars[i], ' ')
else:
pass
return temp
'''
|--------------------------------------------------------------------------
| Meta-data collection:
|--------------------------------------------------------------------------
|
| This is where we collect all audio file details in user selected
| directory.
|
'''
def collect_audio(self, target):
'''
|--------------------------------------------------------------------------
| Find all files:
|--------------------------------------------------------------------------
|
| This is where we find all files in dir and subdirs.
|
'''
root = target
path = os.path.join(root, "targetdirectory")
all_files_in_dir = []
all_files_dir = []
for path, subdirs, files in os.walk(root):
for name in files:
all_files_in_dir.append(self.replace_special_chars(str(name)))
all_files_dir.append(path)
audio_files = []
'''
|--------------------------------------------------------------------------
| Extracted Audio file details:
|--------------------------------------------------------------------------
|
| This is the data we collect in order as listed below:
| 1. Artist
| 2. Title (song name)
| 3. Album title
| 4. Path on local storage
| 5. Track number
| 6. Total number of tracks on specific album
| 7. Tracking id generation
|
'''
audio_file_deets = [[], [], [], [], [], [], []]
files_not_parsed = []
total_files_parsed = 0 # Keeping track of total number of media files in dir
count = 0
for i in range(len(all_files_in_dir)):
if all_files_in_dir[i].endswith(('.mp3', '.wav', '.MP3', '.wma', '.WMA', '.WAV', '.mp4', '.MP4')) == True:
total_files_parsed += 1 # counting each find media file
temp = os.path.join(all_files_dir[i],str(all_files_in_dir[i]))
current_audiofile = TinyTag.get(temp)
if current_audiofile is not None:
curr_artist = self.replace_special_chars(current_audiofile.artist)
if curr_artist == '':
curr_artist = None
audio_file_deets[0].append(curr_artist)
else:
audio_file_deets[0].append(curr_artist)
audio_file_deets[1].append(current_audiofile.title)
audio_file_deets[2].append(current_audiofile.album)
audio_file_deets[3].append(temp)
audio_file_deets[4].append(current_audiofile.track)
audio_file_deets[5].append(current_audiofile.track_total)
audio_file_deets[6].append(count)
count = count + 1
audio_files.append(all_files_in_dir[i])
if current_audiofile == None:
files_not_parsed.append(temp)
return audio_file_deets, files_not_parsed, all_files_in_dir, all_files_dir, total_files_parsed
'''
|--------------------------------------------------------------------------
| Meta-data sorting:
|--------------------------------------------------------------------------
|
| This is where we make a l | ist of all known meta-data, for later use
| when creating song storage folders.
| |
'''
def music_handling(self, audio_file_deets):
artist_names = []
for i in range(len(audio_file_deets[0])):
'''
|--------------------------------------------------------------------------
| Potential Bug: Unicode Testing Required
|--------------------------------------------------------------------------
|
| Unicode testing function must enter here once completed, e.g:
| isinstance(a, unicode)
|
'''
if audio_file_deets[0][i] is not None:
artist_names.append(str(audio_file_deets[0][i].strip()
.encode('utf-8')))
if audio_file_deets[0][i] is None:
artist_names.append(i)
folder_titles = []
unknown_songs = [[],[]]
for i in range(len(artist_names)):
temp = [x for x, val in enumerate(artist_names)
if val == artist_names[i]]
if len(temp) > 1:
if artist_names[i] in folder_titles:
pass
else:
folder_titles.append(artist_names[i])
elif len(temp) == 1:
temp2 = isinstance(artist_names[i], str)
if temp2 == True:
folder_titles.append(artist_names[i])
else:
unknown_songs[0].append(artist_names[i])
unknown_songs[1].append(audio_file_deets[3][i])
return artist_names, folder_titles, unknown_songs
'''
|--------------------------------------------------------------------------
| Make Artist Directories:
|--------------------------------------------------------------------------
|
|
Lorquas/subscription-manager | test/rhsmlib_test/test_products.py | Python | gpl-2.0 | 14,073 | 0.001279 | from __future__ import print_function, division, absolute_import
# Copyright (c) 2017 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
import dbus
import mock
import json
import datetime
from test.rhsmlib_test.base import DBusObjectTest, InjectionMockingTest
from subscription_manager import injection as inj
from subscription_manager.cert_sorter import CertSorter
from subscription_manager.validity import ValidProductDateRangeCalculator
from subscription_manager.cp_provider import CPProvider
from test import stubs
from rhsm import connection
from rhsmlib.dbus.objects import ProductsDBusObject
from rhsmlib.dbus import constants
from rhsmlib.services import products
START_DATE = datetime.datetime.now() - datetime.timedelta(days=100)
NOW_DATE = datetime.datetime.now()
END_DATE = datet | ime.datetime.now() + datetime.timedelta(days=265)
NO_CONTENT_JSON = [{
"id": "4028fa7a5da1fbc201 | 5da203aba209b7",
"uuid": "57b7dbff-9489-43ac-991a-b848324b423a",
"name": "localhost.localdomain",
"username": "admin",
"entitlementStatus": "valid",
"serviceLevel": "",
"releaseVer": {
"releaseVer": None
},
"idCert": {
"key": "FAKE RSA PRIVATE KEY",
"cert": "FAKE CERTIFICATE",
"serial": {
"id": 8134386700568860251,
"revoked": False,
"collected": False,
"expiration": END_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"serial": 8134386700568860251,
"created": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"updated": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000")
},
"id": "4028fa7a5da1fbc2015da203ad8c09b9",
"created": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"updated": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000")
},
"type": {
"id": "1000",
"label": "system",
"manifest": False
},
"owner": {
"id": "4028fa7a5da1fbc2015da1fdb5380004",
"key": "admin",
"displayName": "Admin Owner",
"href": "/owners/admin"
},
"environment": None,
"entitlementCount": 0,
"facts": {},
"lastCheckin": NOW_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"installedProducts": [],
"canActivate": False,
"capabilities": [],
"hypervisorId": None,
"contentTags": [],
"autoheal": True,
"contentAccessMode": None,
"recipientOwnerKey": None,
"annotations": None,
"href": "/consumers/57b7dbff-9489-43ac-991a-b848324b423a",
"dev": False,
"created": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"updated": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000")
}]
CONTENT_JSON = [{
"id": "4028fa7a5da1fbc2015da203aba209b7",
"uuid": "57b7dbff-9489-43ac-991a-b848324b423a",
"name": "localhost.localdomain",
"username": "admin",
"entitlementStatus": "valid",
"serviceLevel": "",
"releaseVer": {
"releaseVer": None
},
"idCert": {
"key": "FAKE RSA PRIVATE KEY",
"cert": "FAKE CERTIFICATE",
"serial": {
"id": 8134386700568860251,
"revoked": False,
"collected": False,
"expiration": END_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"serial": 8134386700568860251,
"created": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"updated": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000")
},
"id": "4028fa7a5da1fbc2015da203ad8c09b9",
"created": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"updated": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000")
},
"type": {
"id": "1000",
"label": "system",
"manifest": False
},
"owner": {
"id": "4028fa7a5da1fbc2015da1fdb5380004",
"key": "admin",
"displayName": "Admin Owner",
"href": "/owners/admin"
},
"environment": None,
"entitlementCount": 0,
"facts": {},
"lastCheckin": "2017-08-02T08:16:39+0000",
"installedProducts": [
{
"id": "8a99f9895d8b4f96015d9db99e9971fb",
"productId": "69",
"productName": "Red Hat Enterprise Linux Server",
"version": "7.4",
"arch": "x86_64",
"status": "green",
"startDate": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"endDate": END_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"created": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"updated": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000")
},
{
"id": "8a99f9895d8b4f96015d9db99e9971fc",
"productId": "70",
"productName": "Red Hat Enterprise Linux Server - Extended Update Support",
"version": "7.2",
"arch": "x86_64",
"status": "green",
"startDate": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"endDate": END_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"created": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"updated": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000")
}
],
"canActivate": False,
"capabilities": [],
"hypervisorId": None,
"contentTags": [],
"autoheal": True,
"contentAccessMode": None,
"recipientOwnerKey": None,
"annotations": None,
"href": "/consumers/57b7dbff-9489-43ac-991a-b848324b423a",
"dev": False,
"created": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000"),
"updated": START_DATE.strftime("%Y-%m-%dT%H:%M:%S+0000")
}]
class TestProductService(InjectionMockingTest):
def setUp(self):
super(TestProductService, self).setUp()
self.mock_cert_sorter = mock.Mock(spec=CertSorter, name="CertSorter")
self.mock_cp = mock.Mock(spec=connection.UEPConnection, name="UEPConnection")
self.mock_calculator = mock.Mock(spec=ValidProductDateRangeCalculator, name="ValidProductDateRangeCalculator")
def injection_definitions(self, *args, **kwargs):
if args[0] == inj.CERT_SORTER:
return self.mock_cert_sorter
elif args[0] == inj.PRODUCT_DATE_RANGE_CALCULATOR:
return self.mock_calculator
else:
return None
def _create_rhel74_cert(self):
return self._create_cert("69", "Red Hat Enterprise Linux Server",
"7.4", "rhel-7,rhel-7-server")
def _create_rhel72_ues_cert(self):
return self._create_cert("70", "Red Hat Enterprise Linux Server - Extended Update Support",
"7.2", "rhel-7-eus-server,rhel-7-server")
@staticmethod
def _create_cert(product_id, name, version, provided_tags):
cert = stubs.StubProductCertificate(
product=stubs.StubProduct(
product_id=product_id,
name=name,
version=version,
provided_tags=provided_tags
),
start_date=START_DATE,
end_date=END_DATE
)
cert.delete = mock.Mock()
cert.write = mock.Mock()
return cert
def test_list_no_installed_products(self):
self.mock_cp.getConsumer.return_value = NO_CONTENT_JSON
self.mock_cert_sorter.installed_products = []
result = products.InstalledProducts(self.mock_cp).list()
self.assertEqual([], result)
def test_list_installed_products_without_filter(self):
self.mock_cp.getConsumer.return_value = CONTENT_JSON
self.mock_cert_sorter.reasons = mock.Mock()
self.mock_cert_sorter.reasons.get_product_reasons = mock.Mock(return_value=[])
self.mock_cert_sorter.get_statu |
ivngithub/testproject | config.py | Python | mit | 1,559 | 0.000641 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
PROJECT_MAIL_SUBJECT_PREFIX = '[Project]'
PROJECT_MAIL_SENDER = 'Project Admin <project@example.com>'
PROJECT_ADMIN = os.environ.get('PROJECT_ADMIN')
CELERY_BROKER_URL = 'amqp://localhost//'
CELERY_RESULT_BACKEND = 'amqp://'
CELERY_INCLUDE = ['celery_worker']
SQL_USERNAME = os.environ.get('MYSQL_USERNAME')
SQL_PASSWORD = os.environ.get('MYSQL_PASSWORD')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://' + str(Config.SQL_USERNAME) + ': | ' + str(
Config.SQL_PASSWORD) + '@localhost/testproject'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': Pr | oductionConfig,
'default': DevelopmentConfig
}
|
m00dawg/holland | holland/core/log.py | Python | bsd-3-clause | 1,274 | 0.007849 | import os
import sys
import logging
__all__ = [
'clear_root_handlers',
'setup_console_logging',
'setup_file_logging'
]
DEFAULT_DATE_FORMAT = '%a, %d %b %Y %H:%M:%S'
DEFAULT_LOG_FORMAT = '%(asctime)s [%(levelname)s] %(message)s'
DEFAULT_LOG_LEVEL = logging.INFO
class NullHandler(logging.Handler):
def emit(self, record):
pass
def clear_root_handlers():
root = logging.getLogger()
map(root.removeHandler, root.handlers)
def setup_console_logging(level=DEFAULT_LOG_LEVEL,
format='%(message)s',
datefmt=DEFAULT_DATE_FORMAT):
root = loggi | ng.getLogger()
root.setLevel(level)
handler = logging.StreamHandl | er()
formatter = logging.Formatter(format, datefmt)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
def setup_file_logging(filename,
level=DEFAULT_LOG_LEVEL,
format=DEFAULT_LOG_FORMAT,
datefmt=DEFAULT_DATE_FORMAT):
root = logging.getLogger()
root.setLevel(level)
handler = logging.FileHandler(filename, 'a', encoding='utf8')
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
|
libvirt/libvirt-test-API | libvirttestapi/repos/domain/hostname.py | Python | gpl-2.0 | 1,070 | 0.000935 | # Copyright ( | C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
# To test "virsh hostname" command
from libv | irttestapi.utils import process
required_params = ()
optional_params = {}
VIRSH_HOSTNAME = "virsh hostname"
def hostname(params):
"""check virsh hostname command
"""
logger = params['logger']
ret = process.run(VIRSH_HOSTNAME, shell=True, ignore_status=True)
if ret.exit_status:
logger.error("executing " + "\"" + VIRSH_HOSTNAME + "\"" + " failed")
return 1
virsh_ret = ret.stdout
logger.info("the output of " + "\"" + VIRSH_HOSTNAME + "\"" + " is %s" % virsh_ret)
ret = process.run("hostname", shell=True, ignore_status=True)
if ret.exit_status:
logger.error("executing " + "\"" + "hostname" + "\"" + " failed")
return 1
host_ret = ret.stdout
if virsh_ret[:-1] != host_ret:
logger.error("the output of " + VIRSH_HOSTNAME + " is not right")
return 1
else:
logger.info(VIRSH_HOSTNAME + " testing succeeded")
return 0
|
lcvisser/task-chrono | util.py | Python | mit | 4,240 | 0.004953 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Ludo Visser
#
# task-chrono is distributed under the terms and conditions of the MIT license.
# The full license can be found in the LICENSE file.
import numpy
import os
import StringIO as sio
# Useful enumeration class
class Enum:
def __init__(self, *sequential, **named):
enumerated = dict(zip(sequential, range(len(sequential))), **named)
for name, value in enumerated.iteritems():
setattr(self, name, value)
# Define colors
GREEN = (0.43,0.92,0.80)
RED = (0.92,0.43,0.43)
DARK_BLUE = (0.43, 0.43, 0.92)
LIGHT_BLUE = (0.77, 0.9, 1.0)
# Try to load matplotlib
mpl_available = None
if __name__ == '__main__':
# Running as script
os.chdir(os.environ['HOME'])
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
mpl_available = True
else:
# Running on GAE
# Set-up environment for matplotlib
os.environ['MATPLOTLIBDATA'] = os.getcwdu()
os.environ['MPLCONFIGDIR'] = os.getcwdu()
# Try to import matplotlib
mpl_available = True
try:
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
except ImportError:
# Running on GAE Development server
mpl_available = False
# PNG generator for estimation statistics
def generate_est_png(tasks, number=10, display=False):
if len(tasks) > 0 and mpl_available:
# Allocate arrays
N = len(tasks)
x = numpy.arange(N)
lbe = numpy.zeros(N)
errors = numpy.zeros(N)
average = numpy.zeros(N)
sigma_minus = numpy.zeros(N)
sigma_plus = numpy.zeros(N)
colors = []
# Plot properties
M = max([0, N - number]) # index of first task to show
w = 0.4 # width of the bars
# Compute values
for i, task in enumerate(tasks):
lbe[i] = i - w/2
errors[i] = (task.duration - task.estimate) / 60.0 # in minutes
average[i] = numpy.average(errors[:i+1]) / (i + 1)
sigma_minus[i] = average[i] - numpy.std(errors[:i+1])
sigma_plus[i] = average[i] + numpy.std(errors[:i+1])
if errors[i] <= 0:
# Logged duration was less than estimate
colors.append(GREEN)
else:
# Logged duration was more than estimate
colors.append(RED)
# Create plot
fig = Figure()
canvas = FigureCanvas(fig)
ax = fig.add_subplot(1, 1, 1)
ax.hold(True)
ax.set_xlim(M-w/2, N-1+w/2)
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_ylim(min([-10, numpy.min(errors)-10]), max([180, numpy.max(errors)+10]))
ax.set_ylabel('Estimation error [minutes]')
ax.grid(axis='y')
# Plot data
ax.plot(x[M:], average[M:], color=DARK | _BLUE)
ax.plot(x[M:], sigma_minus[M:], color=LIGHT_ | BLUE)
ax.plot(x[M:], sigma_plus[M:], color=LIGHT_BLUE)
ax.fill_between(x[M:], y1=sigma_minus[M:], y2=sigma_plus[M:], color=LIGHT_BLUE)
ax.bar(lbe[M:], errors[M:], w, color=colors[M:], alpha=0.4)
# Create labels
for x, task in enumerate(tasks[M:]):
ax.text(x+M, 1, task.name, rotation=90, ha='center', va='bottom')
# Output
if not display:
# Output as base64-encoded string
im = sio.StringIO()
fig.savefig(im, format='png')
png = im.getvalue().encode('base64').strip()
return png
else:
# Show on screen
canvas.show()
return None
else:
return None
# Unit test
if __name__ == '__main__':
import random
class Task:
def __init__(self, n, e, d):
self.name = n
self.estimate = e
self.duration = d
tasks = []
for i in range(25):
tasks.append(Task('Task with ID' + str(i),
random.normalvariate(3600, 20),
random.normalvariate(3600, 20)))
generate_est_png(tasks, display=True)
|
SGenheden/Scripts | Membrane/build_lipid.py | Python | mit | 3,765 | 0.007437 | # Author: Samuel Genheden, samuel.genheden@gmail.com
"""
Program to build lipids from a template, similarly to MARTINI INSANE
Is VERY experimental!
"""
import argparse
import os
import xml.etree.ElementTree as ET
import numpy as np
from sgenlib import pdb
class BeadDefinition(object):
def __init__(self):
self.name = None
self.xyz = None
def parse(self, element):
if "name" in element.attrib:
self.name = element.attrib["name"]
else:
return
if "xyz" in element.attrib:
self.xyz = np.array(element.attrib["xyz"].split(), dtype=float)
def __str__(self):
return "%s (%s)"%(self.name,",".join("%.2f"%c for c in self.xyz))
class LipidTemplate(object):
def __init__(self):
self.na | me = None
self.beads = []
self.headname = []
self.tailname = []
self.head = []
self.tail = []
def make(self, bd=3.0):
struct = pdb.PDBFile()
res = pdb.Residue()
for i, bead in enumerate(self.beads):
| atom = pdb.Atom()
atom.idx = i
atom.serial = i + 1
atom.name = bead.name
atom.resname = self.name
atom.residue = 1
atom.set_xyz(bead.xyz*bd)
res.atoms.append(atom)
struct.atoms.append(atom)
struct.residues.append(res)
allcoord = np.asarray([a.xyz for a in struct.atoms])
offset = allcoord.mean(axis=0) + 50.0
for a in struct.atoms:
a.set_xyz(a.xyz+offset)
struct.box = np.asarray([100,100,100])
return struct
def parse(self, element):
if "name" in element.attrib:
self.name = element.attrib["name"]
else:
return
if "head" in element.attrib:
self.headname = element.attrib["head"].split()
if "tail" in element.attrib:
self.tailname = element.attrib["tail"].split()
for child in element:
if child.tag != "bead":
continue
b = BeadDefinition()
b.parse(child)
if b.name is not None:
self.beads.append(b)
if b.name in self.headname :
self.head.append(b)
elif b.name in self.tailname :
self.tail.append(b)
def __str__(self):
return self.name+"\n\t"+"\n\t".join(b.__str__() for b in self.beads)
class LipidCollection(object):
def __init__(self):
self.lipids = {}
def load(self, filename):
tree = ET.parse(filename)
# Parse lipids
for child in tree.getroot():
if child.tag != "lipid":
continue
lipid = LipidTemplate()
lipid.parse(child)
if lipid.name is not None:
self.lipids[lipid.name] = lipid
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description="Building lipids from templates")
parser.add_argument('-l','--lipid',help="the lipid to build")
parser.add_argument('-x','--xml',help="the definition of templates")
parser.add_argument('-o','--out',help="the output name",default="lipid.pdb")
parser.add_argument('--bd',type=float,help="the spacing between beads",default=3.0)
args = parser.parse_args()
lipidbook = LipidCollection()
if args.xml is None:
thispath = os.path.dirname(os.path.abspath(__file__))
args.xml = os.path.join(thispath,"lipid_templates.xml")
lipidbook.load(args.xml)
if args.lipid in lipidbook.lipids:
struct = lipidbook.lipids[args.lipid].make(bd=args.bd)
struct.write(args.out)
else:
"%s not in the XML file"%args.lipid
|
quantopian/zipline | zipline/assets/asset_db_schema.py | Python | apache-2.0 | 4,743 | 0 | import sqlalchemy as sa
# Define a version number for the database generated by these writers
# Increment this version number any time a change is made to the schema of the
# assets database
# NOTE: When upgrading this remember to add a downgrade in:
# .asset_db_migrations
ASSET_DB_VERSION = 7
# A frozenset of the names of all tables in the assets db
# NOTE: When modifying this schema, update the ASSET_DB_VERSION value
asset_db_table_names = frozenset({
'asset_router',
'equities',
'equity_symbol_mappings',
'equity_supplementary_mappings',
'futures_contracts',
'exchanges',
'futures_root_symbols',
'version_info',
})
metadata = sa.MetaData()
exchanges = sa.Table(
'exchanges',
metadata,
sa.Column(
'exchange',
sa.Text,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('canonical_name', sa.Text, nullable=False),
sa.Column('country_code', sa.Text, nullable=False),
)
equities = sa.Table(
'equities',
metadata,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('asset_name', sa. | Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_t | raded', sa.Integer),
sa.Column('auto_close_date', sa.Integer),
sa.Column('exchange', sa.Text, sa.ForeignKey(exchanges.c.exchange)),
)
equity_symbol_mappings = sa.Table(
'equity_symbol_mappings',
metadata,
sa.Column(
'id',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column(
'sid',
sa.Integer,
sa.ForeignKey(equities.c.sid),
nullable=False,
index=True,
),
sa.Column(
'symbol',
sa.Text,
nullable=False,
),
sa.Column(
'company_symbol',
sa.Text,
index=True,
),
sa.Column(
'share_class_symbol',
sa.Text,
),
sa.Column(
'start_date',
sa.Integer,
nullable=False,
),
sa.Column(
'end_date',
sa.Integer,
nullable=False,
),
)
equity_supplementary_mappings = sa.Table(
'equity_supplementary_mappings',
metadata,
sa.Column(
'sid',
sa.Integer,
sa.ForeignKey(equities.c.sid),
nullable=False,
primary_key=True
),
sa.Column('field', sa.Text, nullable=False, primary_key=True),
sa.Column('start_date', sa.Integer, nullable=False, primary_key=True),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('value', sa.Text, nullable=False),
)
futures_root_symbols = sa.Table(
'futures_root_symbols',
metadata,
sa.Column(
'root_symbol',
sa.Text,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('root_symbol_id', sa.Integer),
sa.Column('sector', sa.Text),
sa.Column('description', sa.Text),
sa.Column(
'exchange',
sa.Text,
sa.ForeignKey(exchanges.c.exchange),
),
)
futures_contracts = sa.Table(
'futures_contracts',
metadata,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text, unique=True, index=True),
sa.Column(
'root_symbol',
sa.Text,
sa.ForeignKey(futures_root_symbols.c.root_symbol),
index=True
),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer),
sa.Column(
'exchange',
sa.Text,
sa.ForeignKey(exchanges.c.exchange),
),
sa.Column('notice_date', sa.Integer, nullable=False),
sa.Column('expiration_date', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer, nullable=False),
sa.Column('multiplier', sa.Float),
sa.Column('tick_size', sa.Float),
)
asset_router = sa.Table(
'asset_router',
metadata,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True),
sa.Column('asset_type', sa.Text),
)
version_info = sa.Table(
'version_info',
metadata,
sa.Column(
'id',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column(
'version',
sa.Integer,
unique=True,
nullable=False,
),
# This constraint ensures a single entry in this table
sa.CheckConstraint('id <= 1'),
)
|
zubie7a/Algorithms | CodeSignal/Arcade/The_Core/Level_03_Corner_Of_Zeros_And_Ones/017_Kill_Kth_Bit.py | Python | mit | 492 | 0 | # https://app.codesignal.com/arcade/code-arcade/corn | er-of-0s-and-1s/b5z4P2r2CGCtf8HCR
def killKthBit(n, k):
# Use bit operators to turn off the k-th bit from the right.
# First create a value with the bit at the position turned on
# and everything else off. Then flip that value so all bits
# are 1 except the one in position. Then, 'bitwise and' with
# this value will leave all bits | untouched except the one in
# the desired position.
return n & ~(1 << (k - 1))
|
tdda/tdda | tdda/constraints/db/constraints.py | Python | mit | 17,342 | 0.000115 | # -*- coding: utf-8 -*-
"""
TDDA constraint discovery and verification is provided for a number
of DB-API (PEP-0249) compliant databases, and also for a number of other
(NoSQL) databases.
The top-level functions are:
:py:func:`tdda.constraints.discover_db_table`:
Discover constraints from a single database table.
:py:func:`tdda.constraints.verify_db_table`:
Verify (check) a single database table, against a set of previously
discovered constraints.
:py:func:`tdda.constraints.detect_db_table`:
For detection of failing records in a single database table,
but not yet implemented for databases.
"""
import sys
from tdda.constraints.base import (
DatasetConstraints,
Verification,
)
from tdda.constraints.baseconstraints import (
BaseConstraintCalculator,
BaseConstraintDetector,
BaseConstraintVerifier,
BaseConstraintDiscoverer,
MAX_CATEGORIES,
)
from tdda.constraints.db.drivers import DatabaseHandler
from tdda import rexpy
if sys.version_info[0] >= 3:
long = int
class DatabaseConstraintCalculator(BaseConstraintCalculator):
def __init__(self, tablename, testing=False):
self.tablename = tablename
self.testing = testing
def is_null(self, value):
return self.db_value_is_null(value)
def to_datetime(self, value):
return self.db_value_to_datetime(value)
def column_exists(self, colname):
return colname in self.get_column_names()
def get_column_names(self):
return self.get_database_column_names(self.tablename)
def get_nrecords(self):
return self.get_database_nrows(self.tablename)
def types_compatible(self, x, y, colname=None):
return types_compatible(x, y, colname if not self.testing else None)
def calc_min(self, colname):
return self.get_database_min(self.tablename, colname)
def calc_max(self, colname):
return self.get_database_max(self.tablename, colname)
def calc_min_length(self, colname):
return self.get_database_min_length(self.tablename, colname)
def calc_max_length(self, colname):
return self.get_database_max_length(self.tablename, colname)
def calc_tdda_type(self, colname):
return self.get_database_column_type(self.tablename, colname)
def calc_null_count(self, colname):
return self.get_database_nnull(self.tablename, colname)
def calc_non_null_count(self, colname):
return self.get_database_nnonnull(self.tablename, colname)
def calc_nunique(self, colname):
return self.get_database_nunique(self.tablename, colname)
def calc_unique_values(self, colname, include_nulls=True):
return self.get_database_unique_values(self.tablename, colname,
include_nulls=include_nulls)
def calc_non_integer_values_count(self, colname):
raise Exception('database should not require non_integer_values_count')
def calc_all_non_nulls_boolean(self, colname):
raise Exception('database should not require all_non_nulls_boolean')
def find_rexes(self, colname, values=None, seed=None):
if not values:
values = self.get_database_unique_values(self.tablename, colname)
return rexpy.extract(sorted(values), seed=seed)
def calc_rex_constraint(self, colname, constraint, detect=False):
return not self.get_database_rex_match(self.tablename, colname,
constraint.value)
class DatabaseConstraintDetector(BaseConstraintDetector):
"""
No-op implementation of the Constraint Detector methods for
databases.
"""
def __init__(self, tablename):
pass
class DatabaseConstraintVerifier(DatabaseConstraintCalculator,
DatabaseConstraintDetector,
BaseConstraintVerifier,
DatabaseHandler):
"""
A :py:class:`DatabaseConstraintVerifier` object provides methods
for verifying every type of constraint against a single database table.
"""
def __init__(self, dbtype, db, tablename, epsilon=None,
type_checking='strict', testing=False):
"""
Inputs:
*dbtype*:
Type of database.
*db*:
A DB-API database connection object (as obtained from
a call to the connect() method on the underlying database
driver).
*tablename*:
A table name, referring to a table that exists in the
database and is accessible. It can either be a simple
name, or a schema-qualified name of the form `schema.name`.
"""
DatabaseHandler.__init__(self, dbtype, db)
tablename = self.resolve_table(tablename)
DatabaseConstraintCalculator.__init__(self, tablename, testing)
DatabaseConstraintDetector.__init__(self, tablename)
BaseConstraintVerifier.__init__(self, epsilon=epsilon,
type_checking=type_checking)
class DatabaseVerification(Verification):
"""
A :py:class:`DatabaseVerification` object is the variant of
the :py:class:`tdda.constraints.base.Verification` object used for
verification of constraints on a database table.
"""
def __init__(self, *args, **kwargs):
Verification.__init__(self, *args, **kwargs)
class DatabaseConstraintDiscoverer(DatabaseConstraintCalculator,
BaseConstraintDiscoverer,
DatabaseHandler):
"""
A :py:class:`DatabaseConstraintDiscoverer` object is used to discover
constraints on a single database table.
"""
def __init__(self, dbtype, db, tablename, inc_rex=False, seed=None):
DatabaseHandler.__init__(self, dbtype, db)
tablename = self.resolve_table(tablename)
DatabaseConstraintCalculator.__init__(self, tablename)
BaseConstraintDiscoverer.__init__(self, inc_rex=inc_rex, seed=seed)
self.tablename = tablename
def types_compatible(x, y, colname):
"""
Returns boolean indicating whether the coarse_type of *x* and *y* are
the same, for scalar values. The int and long types are considered to
be the same.
For databases, coarse types are pretty much the same as the column types,
except that different sizes of integer are all considered to be ints.
If *colname* is provided, and the check fails, a warning is issued
to stderr.
"""
tx = int if type(x) is long else type(x)
ty = int if type(y) is long else type(y)
ok = tx == ty
if not ok and colname:
print('Warning: Failing incompatible types constraint for field %s '
'of type %s.\n(Constraint value %s of type %s.)'
% (colname, type(x), y, type(y)), file=sys.stderr)
return ok
def verify_db_table(dbtype, db, tablename, constraints_path, epsilon=None,
type_checking='strict', testing=False, report='all',
**kwargs):
"""
Verify that (i.e. check whether) the database table provided
satisfies the constraints in the JSON .tdda file provided.
Mandatory Inputs:
*dbtype*:
Type of database.
*db*:
A database object
*tablename*:
| A database table name, to be checked.
*constraints_path*:
The path to a JSON .tdda file (possibly
generated by the discover_constraints
| function, below) containing constraints
to be checked.
Optional Inputs:
*epsilon*:
When checking minimum and maximum values
for numeric fields, this provides a
tolerance. The tolerance is a proportion
of the constraint value by which the
constraint can be exce |
etscrivner/pymemcache | pymemcache/errors.py | Python | bsd-3-clause | 465 | 0 | # -*- coding: utf-8 -*-
"""
pymemcach | e.errors
~~~~~~~~~~~~~~~~~
Exceptions base classes for pymemcache
"""
class Error(Exception):
"""Base exception for all pymemcache errors"""
class ConnectionError(Error):
"""Base class for any socket-level connection issues"""
class RequestError(Error):
"""Base class for errors related to the request conte | nts"""
class ResponseError(Error):
"""Base class for errors related to responses"""
|
frederick623/HTI | omm/merge_csv.py | Python | apache-2.0 | 3,078 | 0.022092 |
import sqlite3
import csv
def csv_to_arr(csv_file, start=1, has_header=True):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f)
arr = list(reader)
if arr == []:
return
header = ""
if has_header:
header = ','.join(arr[0])
arr = arr[start:]
return header, arr
else:
return arr[start:]
return
def db_cur():
# Register the adapter
# sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
# sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = None, index_arr = None):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
if index_arr is not None:
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr is not None:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def arr_to_csv(file_name, header, data_arr):
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
new_header, new_arr = csv_to_arr("HORIZON_cash_execs_20170427.csv")
old_header, old_arr = csv_to_arr("HORIZON_cash_exec | s_20170427_old.csv")
conn, cur = db | _cur()
create_tbl(cur, "new", new_header, new_arr)
create_tbl(cur, "old", old_header, old_arr)
# print new_header
cur.execute("""select new.TRADEID,new.PORTFOLIOID,new.BROKEREXCHANGEID,new.MIC,new.USERID,new.IMSID,new.ORDERID,new.WAY,new.QUANTITY,new.PRICE,new.TIMESTAMP,new.IMSUSERID,new.USERDATA,old.MARKETDATA,new.EXECUTIONSTATE,new.PRODUCTID,new.CONTEXT,new.TOHEDGE,new.SESSIONID,new.HEDGEULID,new.HEDGERATIO,new.HEDGEULSPOT,new.HEDGEPROXYULSPOT,new.EXCHANGEFEES,new.CLEARINGFEES,new.BROKERAGEFEES,new.CROSSTRADEID,new.FEESDATA,new.ISCOMPLETE,new.IGNOREINPOSITION,new.MARKETTRADEID,new.PRODUCTTOHEDGEFOREX,new.SHORTSELLTYPE,new.TRADETYPE,new.CURRENCYID,new.EVENTTIMESTAMP,new.VERSION,new.BROKERNAME,new.IMMEDIATEHEDGEPERCENT,new.IMMEDIATEHEDGESTRATEGY,new.ORIGINALORDERPRICE,new.ORIGINALORDERQUANTITY,new.ORDERTIMESTAMP,old.EXECMARKETDATA,new.SETTLEMENTDATE,new.SETTLEMENTFEES,new.COMMENTS,new.IMSCHANNELTYPE,new.PRODUCTTOPOSITIONFOREX,new.PRICINGSNAPSTATUS,new.HEDGESNAPSTATUS,new.FOREXSNAPSTATUS,new.HEDGEFOREXSNAPSTATUS,new.STRATEGYID,new.STRATEGYTRADEID,new.PEERCUSTOMFIELDS,new.PEERUID,new.BLOCKTRADETYPE
from old join new on new.TRADEID = old.TRADEID""")
arr = cur.fetchall()
arr_to_csv("x.csv", new_header, arr) |
lindzey/pelican-plugins | render_math/math.py | Python | agpl-3.0 | 14,090 | 0.003123 | # -*- coding: utf-8 -*-
"""
Math Render Plugin for Pelican
==============================
This plugin allows your site to render Math. It uses
the MathJax JavaScript engine.
For markdown, the plugin works by creating a Markdown
extension which is used during the markdown compilation
stage. Math therefore gets treated like a "first class
citizen" in Pelican
For reStructuredText, the plugin instructs the rst engine
to output Mathjax for all math.
The mathjax script is by default automatically inserted
into the HTML.
Typogrify Compatibility
-----------------------
This plugin now plays nicely with Typogrify, but it
requires Typogrify version 2.07 or above.
User Settings
-------------
Users are also able to pass a dictionary of settings
in the settings file which will control how the MathJax
library renders things. This could be very useful for
template builders that want to adjust the look and feel of
the math. See README for more details.
"""
import os
import sys
from pelican import signals, generators
try:
from bs4 import BeautifulSoup
except ImportError as e:
BeautifulSoup = None
try:
from . pelican_mathjax_markdown_extension import PelicanMathJaxExtension
except ImportError as e:
PelicanMathJaxExtension = None
def process_settings(pelicanobj):
"""Sets user specified MathJax settings (see README for more details)"""
mathjax_settings = {}
# NOTE TO FUTURE DEVELOPERS: Look at the README and what is happening in
# this function if any additional changes to the mathjax settings need to
# be incorporated. Also, please inline comment what the variables
# will be used for
# Default settings
mathjax_settings['auto_insert'] = True # if set to true, it will insert mathjax script automatically into content without needing to alter the template.
mathjax_settings['align'] = 'center' # controls alignment of of displayed equations (values can be: left, right, center)
mathjax_settings['indent'] = '0em' # if above is not set to 'center', then this setting acts as an indent
mathjax_settings['show_menu'] = 'true' # controls whether to attach mathjax contextual menu
mathjax_settings['process_escapes'] = 'true' # controls whether escapes are processed
mathjax_settings['latex_preview'] = 'TeX' # controls what user sees while waiting for LaTex to render
mathjax_settings['color'] = 'inherit' # controls color math is rendered in
mathjax_settings['linebreak_automatic'] = 'false' # Set to false by default for performance reasons (see http://docs.mathjax.org/en/latest/output.html#automatic-line-breaking)
mathjax_settings['tex_extensions'] = '' # latex extensions that can be embedded inside mathjax (see http://docs.mathjax.org/en/latest/tex.html#tex-and-latex-extensions)
mathjax_settings['responsive'] = 'false' # Tries to make displayed math responsive
mathjax_settings['responsive_break'] = '768' # The break point at which it math is responsively aligned (in pixels)
mathjax_settings['mathjax_font'] = 'default' # forces mathjax to use the specified font.
mathjax_settings['process_summary'] = BeautifulSoup is not None # will fix up summaries if math is cut off. Requires beautiful soup
mathjax_settings['force_tls'] = 'false' # will force mathjax to be served by https - if set as False, it will only use https if site is served using https
mathjax_settings['message_style'] = 'normal' # This value controls the verbosity of the messages in the lower left-hand corner. Set it to "none" to eliminate all messages
# Source for MathJax
mathjax_settings['source'] = "'//cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'"
# Get the user specified settings
try:
settings = pelicanobj.settings['MATH_JAX']
except:
settings = None
# If no settings have been specified, then return the defaults
if not isinstance(settings, dict):
return mathjax_settings
# The following mathjax settings can be set via the settings dictionary
for key, value in ((key, settings[key]) for key in settings):
# Iterate over dictionary in a way that is compatible with both version 2
# and 3 of python
if key == 'align':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
if value == 'left' or value == 'right' or value == 'center':
mathjax_settings[key] = value
else:
mathjax_settings[key] = 'center'
if key == 'indent':
mathjax_settings[key] = value
if key == 'show_menu' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'message_style':
mathjax_settings[key] = value if value is not None else 'none'
if key == 'auto_insert' and isinstance(value, bool):
mathjax_settings[key] = value
if key == 'process_escapes' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'latex_preview':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
mathjax_settings[key] = value
if key == 'color':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
mathjax_settings[key] = value
if key == 'linebreak_automatic' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'process_summary' an | d isinstance(value, bool):
if value and BeautifulSoup is None:
print("BeautifulSoup4 is needed for summaries to be processed by render_math\nPlease install it")
value = False
mathjax_se | ttings[key] = value
if key == 'responsive' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'force_tls' and isinstance(value, bool):
mathjax_settings[key] = 'true' if value else 'false'
if key == 'responsive_break' and isinstance(value, int):
mathjax_settings[key] = str(value)
if key == 'tex_extensions' and isinstance(value, list):
# filter string values, then add '' to them
try:
value = filter(lambda string: isinstance(string, basestring), value)
except NameError:
value = filter(lambda string: isinstance(string, str), value)
value = map(lambda string: "'%s'" % string, value)
mathjax_settings[key] = ',' + ','.join(value)
if key == 'mathjax_font':
try:
typeVal = isinstance(value, basestring)
except NameError:
typeVal = isinstance(value, str)
if not typeVal:
continue
value = value.lower()
if value == 'sanserif':
value = 'SansSerif'
elif value == 'fraktur':
value = 'Fraktur'
elif value == 'typewriter':
value = 'Typewriter'
else:
value = 'default'
mathjax_settings[key] = value
return mathjax_settings
def process_summary(article):
"""Ensures summaries are not cut off. Also inserts
mathjax script so that math will be rendered"""
summary = article._get_summary()
summary_parsed = BeautifulSoup(summary, 'html.parser')
math = summary_parsed.find_all(class_='math')
if len(math) > 0:
last_math_text = math[-1].get_text()
if len(last_math_text) > 3 and last_math_text[-3:] == '...':
content_parsed = BeautifulSoup(article._content, 'html.parser')
full_text = content_parsed.find_ |
worldforge/cyphesis | data/rulesets/basic/scripts/mind/goals/common/common.py | Python | gpl-2.0 | 3,486 | 0.002008 | # This file is distributed under the terms of the GNU General Public license.
# Copyright (C) 1999 Aloril (See the file COPYING for details).
import time
from mind.Goal import Goal
# goals for minds
def false(_): return False
def true(_): return True
class Delayed(Goal):
"""Will delay execution of sub goals until the specified time."""
def __init__(self, time: float, sub_goals: list, desc="A delayed goal."):
Goal.__init__(self, desc=desc, fulfilled=self.is_right_time, sub_goals=sub_goals)
self.time = time
def is_right_time(self, me):
# Return "false" when the time is right
is_right = time.time() < self.time
return is_right
class OneShot(Goal):
"""Will remove itself after the first successful execution of its subgoals."""
def __init__(self, sub_goals, desc="Executed once."):
Goal.__init__(self, desc=desc, sub_goals=sub_goals)
def check_goal_recursively(self, me, depth, debug_info):
res, debug_info = super().check_goal_recursively(me, depth, debug_info)
if res:
self.irrelevant = True
return res, debug_info
class DelayedOneShot(Goal):
"""Combines delayed execution with one shot. Useful when you want to perform one action once after a certain time."""
| def __init__(self, sub_goals, desc="Executed once after a delay"):
Goal.__init__(self, desc=desc, sub_goals=[OneShot(sub_goals=[Delayed(time=time.time() + 1, sub_go | als=sub_goals)])])
class Condition(Goal):
"""
A conditional goal which first executes a function, and then sets the subgoals to one of two possibilities.
If the condition function returns None then none of the subgoals will be executed.
"""
def __init__(self, condition_fn, goals_true, goals_false, desc="condition"):
Goal.__init__(self, desc=desc, fulfilled=self.assess_condition)
self.condition_fn = condition_fn
self.goals_true = goals_true
self.goals_false = goals_false
def assess_condition(self, me):
result = self.condition_fn(me)
if result is None:
return True
if result:
self.sub_goals = self.goals_true
else:
self.sub_goals = self.goals_false
return False
class Sequence(Goal):
"""A goal which will check on all subgoals in order."""
def __init__(self, sub_goals, desc="Sequence of goals"):
Goal.__init__(self, desc=desc, sub_goals=sub_goals)
def get_reach(me):
reach = 0
own_reach = me.entity.get_prop_float('reach')
if own_reach:
reach += own_reach
attached_current = me.get_attached_entity("hand_primary")
if attached_current:
attached_reach = attached_current.get_prop_float('reach')
if attached_reach:
reach += attached_reach
return reach
def get_focused_location(me, what):
thing = get_focused_thing(me, what)
if thing:
return thing.location
return None
def get_focused_thing(me, what):
focus_id = me.get_knowledge('focus', what)
if focus_id is None:
return None
thing = me.map.get(focus_id)
if thing is None:
me.remove_knowledge('focus', what)
return None
return thing
def get_task(me, task_name):
"""Gets the task by the name from the 'tasks' property, if it exists."""
tasks_prop = me.entity.get_prop_map('tasks')
if tasks_prop and task_name in tasks_prop:
return tasks_prop[task_name]
|
emulbreh/shrubbery | shrubbery/authentication/__init__.py | Python | mit | 172 | 0.011628 | from shrubbery.authentication.contexts imp | ort AuthenticationContext, ModelAuthenticationContext
from shrubbery.authentication.exceptions import AuthenticationErr | or, Http403 |
friedue/AlleleSpecific | individualScripts/removeNegValuesMOD.py | Python | mit | 2,375 | 0.025684 | #!/usr/bin/python
import sys, getopt
def main(argv):
try:
opts, args = getopt.getopt(argv,"hi:o:",["help","mpileupfile=","jfile=","snpfile=","ofile="])
except getopt.GetoptError:
print 'removeNegValuesMOD.py -i <infile> -o <output_file>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'USAGE: removeNegValuesMOD.py -i <infile> -o <output_file>'
| sys.exit()
elif | opt in ("-i", "--infile"):
infile = open( arg, 'r')
elif opt in ("-o", "--ofile"):
outputfile = open( arg, "w")
for line in infile:
entry=line.rstrip().split("\t")
if(len(entry) == 9):
start = entry[3]
end = entry[4]
if(start[0] == "-" and end[0] == "-"):
#outputfile.write(line.replace(start,str(int(start[1:len(start)])+1)).replace(end,str(int(end[1:len(end)])+1)))
outputfile.write(entry[0] + "\t" + entry[1] + "\t" + entry[2] + "\t" + str(int(start[1:len(start)])+1) + "\t" + str(int(end[1:len(end)])+1) + "\t" + entry[5] + "\t" + entry[6] + "\t" + entry[7] + "\t" + entry[8] + "\n")
elif (start[0] == "-"):
#outputfile.write(line.replace(start,str(int(start[1:len(start)])+1)).replace(end,str(int(end)+1)))
outputfile.write(entry[0] + "\t" + entry[1] + "\t" + entry[2] + "\t" + str(int(start[1:len(start)])+1) + "\t" + str(int(end)+1) + "\t" + entry[5] + "\t" + entry[6] + "\t" + entry[7] + "\t" + entry[8] + "\n")
elif (end[0] == "-"):
#outputfile.write(line.replace(end,str(int(end[1:len(end)])+1)).replace(start,str(int(start)+1)))
outputfile.write(entry[0] + "\t" + entry[1] + "\t" + entry[2] + "\t" + str(int(start)+1) + "\t" + str(int(end[1:len(end)])+1) + "\t" + entry[5] + "\t" + entry[6] + "\t" + entry[7] + "\t" + entry[8] + "\n")
else:
#outputfile.write(line)
outputfile.write(entry[0] + "\t" + entry[1] + "\t" + entry[2] + "\t" + str(int(start)+1) + "\t" + str(int(end)+1) + "\t" + entry[5] + "\t" + entry[6] + "\t" + entry[7] + "\t" + entry[8] + "\n")
else:
print line
infile.close()
outputfile.close()
if __name__=='__main__':
main(sys.argv[1:])
# awk '{print $4,$5}' Drosophila_melanogaster.BDGP5.74.indv2.negRem.gtf | grep "-" | sort | uniq
#2048873 -2048887
#20801699 -20801709
|
willkg/redminelib | redminelib/tests/test716.py | Python | mit | 1,444 | 0 | #######################################################################
# This file is part of redminel | ib.
#
# Copyright (C) 2011 Will Kahn-Greene
#
# redminelib is distributed under the MIT license. See the file
# COPYING for distribution details.
#######################################################################
from redminelib.redmine import RedmineScraper
from redminelib.tests import get_testdata
import os
from nose.tools import eq_
def test_716():
rs = RedmineScraper("")
data = open(os.path.join(get_testdata(), | "716.html")).read()
issue = rs.parse_issue(data)
# extracted
eq_(issue["id"], "716")
eq_(issue["title"], u'Apache FCGI documentation In Manual')
eq_(issue["author"], u"Sam Kleinman")
eq_(issue["creation-date"], "12/20/2011 10:23 am")
eq_(issue["last-updated-date"], "12/22/2011 06:29 pm")
eq_(issue["description"], u'')
# details table
eq_(issue["priority"], "Normal")
eq_(issue["status"], "New")
eq_(issue["start-date"], "12/20/2011")
eq_(issue["due-date"], "")
eq_(issue["assigned-to"], "Sam Kleinman")
eq_(issue["progress"], "0%")
eq_(issue["category"], "Documentation")
eq_(issue["fixed-version"], "-")
# history
eq_(len(issue["history"]), 1)
hist1 = issue["history"][0]
eq_(hist1["date"], "12/22/2011 06:29 pm")
eq_(hist1["author"], "Blaise Alleyne")
props = hist1["properties"]
eq_(len(props), 0)
|
qilicun/python | python3/tutorials/filepath.py | Python | gpl-3.0 | 65 | 0.015385 | #!/usr/bin/env python3 |
from pathlib import Path |
p = Path('.')
|
adfernandes/intelhex | intelhex/compat.py | Python | bsd-3-clause | 5,035 | 0.002383 | # Copyright (c) 2011, Bernhard Leiner
# Copyright (c) 2013-2018 Alexander Belchenko
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain
# the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce
# the above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author nor the names
# of its contributors may be used to endorse
# or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Compatibility functions for python 2 and 3.
@author Bernhard Leiner (bleiner AT gmail com)
@author Alexander Belchenko (alexander belchenko AT gmail com)
'''
__docformat__ = "javadoc"
import sys, array
if sys.version_info[0] >= 3:
# Python 3
Python = 3
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
def asstr(s):
if isinstance(s, str):
return s
return s.decode('latin1')
# for python >= 3.2 use 'tobytes', otherwise 'tostring'
array_tobytes = array.array.tobytes if sys.version_info[1] >= 2 else array.array.tostring
IntTypes = (int,)
StrType = str
UnicodeType = str
range_g = range # range generator
def range_l(*args): # range list
return list(range(*args))
def dict_keys(dikt): # dict keys list
return list(dikt.keys())
def dict_keys_g(dikt): # dict keys generator
return dikt.keys()
def dict_items_g(dikt): # dict items generator
return dikt.items()
from io import StringIO, BytesIO
def get_binary_stdout():
return sys.stdout.buffer
def get_binary_stdin():
return sys.stdin.buffer
else:
# Python 2
Pytho | n = 2
asbytes = str
| asstr = str
array_tobytes = array.array.tostring
IntTypes = (int, long)
StrType = basestring
UnicodeType = unicode
#range_g = xrange # range generator
def range_g(*args):
# we want to use xrange here but on python 2 it does not work with long ints
try:
return xrange(*args)
except OverflowError:
start = 0
stop = 0
step = 1
n = len(args)
if n == 1:
stop = args[0]
elif n == 2:
start, stop = args
elif n == 3:
start, stop, step = args
else:
raise TypeError('wrong number of arguments in range_g call!')
if step == 0:
raise ValueError('step cannot be zero')
if step > 0:
def up(start, stop, step):
while start < stop:
yield start
start += step
return up(start, stop, step)
else:
def down(start, stop, step):
while start > stop:
yield start
start += step
return down(start, stop, step)
range_l = range # range list
def dict_keys(dikt): # dict keys list
return dikt.keys()
def dict_keys_g(dikt): # dict keys generator
return dikt.keys()
def dict_items_g(dikt): # dict items generator
return dikt.items()
from cStringIO import StringIO
BytesIO = StringIO
import os
def _force_stream_binary(stream):
"""Force binary mode for stream on Windows."""
if os.name == 'nt':
f_fileno = getattr(stream, 'fileno', None)
if f_fileno:
fileno = f_fileno()
if fileno >= 0:
import msvcrt
msvcrt.setmode(fileno, os.O_BINARY)
return stream
def get_binary_stdout():
return _force_stream_binary(sys.stdout)
def get_binary_stdin():
return _force_stream_binary(sys.stdin)
|
ilia-novikov/xcos-gen | hdl_block.py | Python | gpl-3.0 | 1,461 | 0.000684 | """
This file is part of xcos-gen.
xcos-gen is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
xcos-gen is distributed in the hope t | hat it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPO | SE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with xcos-gen. If not, see <http://www.gnu.org/licenses/>.
Author: Ilia Novikov <ilia.novikov@live.ru>
"""
from block import Block
class HdlBlock:
def __init__(self, block: Block, hdl_type: str):
self.block_type = hdl_type
self.block_id = block.block_id
self.gain = block.gain
self.inputs = block.inputs
self.outputs = block.outputs
self.in_wire = None
self.out_wire = None
def __str__(self):
if not self.gain:
return "{0}: {1}, {2} -> {3}".format(self.block_type, self.block_id, self.in_wire, self.out_wire)
else:
return "{0}: {1}, k = {2}, {3} -> {4}".format(
self.block_type,
self.block_id,
self.gain,
self.in_wire,
self.out_wire
)
|
bistromath/gr-smartnet | src/python/smartnet2decode.py | Python | gpl-3.0 | 13,591 | 0.033037 | #!/usr/bin/env python
"""
This program decodes the Motorola SmartNet II trunking protocol from the control channel
Tune it to the control channel center freq, and it'll spit out the decoded packets.
In what format? Who knows.
Based on your AIS decoding software, which is in turn based on the gr-pager code and the gr-air code.
"""
from gnuradio import gr, gru, blks2, optfir, digital
from gnuradio import audio
from gnuradio import eng_notation
from gnuradio import uhd
from fsk_demod import fsk_demod
from optparse import OptionParser
from gnuradio.eng_option import eng_option
from gnuradio import smartnet
import time
import gnuradio.gr.gr_threading as _threading
import csv
class top_block_runner(_threading.Thread):
def __init__(self, tb):
_threading.Thread.__init__(self)
self.setDaemon(1)
self.tb = tb
self.done = False
self.start()
def run(self):
self.tb.run()
self.done = True
class my_top_block(gr.top_block):
def __init__(self, options, queue):
gr.top_block.__init__(self)
if options.filename is not None:
self.fs = gr.file_source(gr.sizeof_gr_complex, options.filename)
self.rate = options.rate
else:
self.u = uhd.usrp_source(options.addr,
io_type=uhd.io_type.COMPLEX_FLOAT32,
num_channels=1)
if options.subdev is not None:
self.u.set_subdev_spec(options.subdev, 0)
self.u.set_samp_rate(options.rate)
self.rate = self.u.get_samp_rate()
# Set the antenna
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
self.centerfreq = options.centerfreq
print "Tuning to: %fMHz" % (self.centerfreq - options.error)
if not(self.tune(options.centerfreq - options.error)):
print "Failed to set initial frequency"
if options.gain is None: #set to halfway
g = self.u.get_gain_range()
options.gain = (g.start()+g.stop()) / 2.0
print "Setting gain to %i" % options.gain
self.u.set_gain(options.gain)
self.u.set_bandwidth(options.bandwidth)
print "Samples per second is %i" % self.rate
self._syms_per_sec = 3600;
options.samples_per_second = self.rate
options.syms_per_sec = self._syms_per_sec
options.gain_mu = 0.01
options.mu=0.5
options.omega_relative_limit = 0.3
options.syms_per_sec = self._syms_per_sec
options.offset = options.centerfreq - options.freq
print "Control channel offset: %f" % options.offset
self.demod = fsk_demod(options)
self.start_correlator = gr.correlate_access_code_tag_bb("10101100",
0,
"smartnet_preamble") #should mark start of packet
self.smartnet_deinterleave = smartnet.deinterleave()
self.smartnet_crc = smartnet.crc(queue)
if options.filename is None:
self.connect(self.u, self.demod)
else:
self.connect(self.fs, self.demod)
self.connect(self.demod, self.start_correlator, self.smartnet_deinterleave, self.smartnet_crc)
#hook up the audio patch
if options.audio:
self.audiorate = 48000
self.audiotaps = gr.firdes.low_pass(1, self.rate, 8000, 2000, gr.firdes.WIN_HANN)
self.prefilter_decim = int(self.rate / self.audiorate) #might have to use a rational resampler for audio
print "Prefilter decimation: %i" % self.prefilter_decim
self.audio_prefilter = gr.freq_xlating_fir_filter_ccf(self.prefilter_decim, #decimation
self.audiotaps, #taps
0, #freq offset
self.rate) #sampling rate
#on a trunked network where you know you will have good signal, a carrier power squelch works well. real FM receviers use a noise squelch, where
#the received audio is high-passed above the cutoff and then fed to a reverse squelch. If the power is then BELOW a threshold, open the squelch.
self.squelch = gr.pwr_squelch_cc(options.squelch, #squelch point
alpha = 0.1, #wat
ramp = 10, #wat
gate = False)
self.audiodemod = blks2.fm_demod_cf(self.rate/self.prefilter_decim, #rate
1, #audio decim | ation
4000, #deviation
3000, #audio passband
4000, #audio stopband
1, #gain
75e-6) #deemphasi | s constant
#the filtering removes FSK data woobling from the subaudible channel (might be able to combine w/lpf above)
self.audiofilttaps = gr.firdes.high_pass(1, self.audiorate, 300, 50, gr.firdes.WIN_HANN)
self.audiofilt = gr.fir_filter_fff(1, self.audiofilttaps)
self.audiogain = gr.multiply_const_ff(options.volume)
self.audiosink = audio.sink (self.audiorate, "")
# self.audiosink = gr.wavfile_sink("test.wav", 1, self.audiorate, 8)
self.mute()
if options.filename is None:
self.connect(self.u, self.audio_prefilter)
else:
self.connect(self.fs, self.audio_prefilter)
# self.connect(self.audio_prefilter, self.squelch, self.audiodemod, self.audiofilt, self.audiogain, self.audioresamp, self.audiosink)
self.connect(self.audio_prefilter, self.squelch, self.audiodemod, self.audiofilt, self.audiogain, self.audiosink)
###########SUBCHANNEL DECODING EXPERIMENT###########
#here we set up the low-pass filter for audio subchannel data decoding. gain of 10, decimation of 10.
# self.subchannel_decimation = 50
# self.subchannel_gain = 10
# self.subchannelfilttaps = gr.firdes.low_pass(self.subchannel_gain, self.audiorate, 200, 40, firdes.WIN_HANN)
# self.subchannelfilt = gr.fir_filter_fff(self.subchannel_decimation, self.subchannelfilttaps)
# self.subchannel_syms_per_sec = 150
# self.subchannel_samples_per_symbol = (self.audiorate / self.subchannel_decimation) / self.subchannel_syms_per_sec
# print "Subchannel samples per symbol: %f" % self.subchannel_samples_per_symbol
# self.subchannel_clockrec = gr.clock_recovery_mm_ff(self.subchannel_samples_per_symbol,
# 0.25*0.01*0.01,
# 0.5,
# 0.01,
# 0.3)
# self.subchannel_slicer = gr.binary_slicer_fb()
# self.subchannel_correlator = gr.correlate_access_code_bb("01000",0)
# self.subchannel_framer = smartnet.subchannel_framer()
# self.subchannel_sink = gr.null_sink(1); #just so it doesn't bitch until we do something with it
# self.connect(self.audiodemod, self.subchannelfilt, self.subchannel_clockrec, self.subchannel_slicer, self.subchannel_correlator, self.subchannel_framer, self.subchannel_sink)
def tune(self, freq):
result = self.u.set_center_freq(freq)
return True
def tuneoffset(self, target_freq, rffreq):
#print "Setting offset; target freq is %f, Center freq is %f" % (target_freq, rffreq)
self.audio_prefilter.set_center_freq(rffreq-target_freq*1e6)
def setvolume(self, vol):
self.audiogain.set_k(vol)
def mute(self):
self.setvolume(0)
def unmute(self, volume):
self.setvolume(volume)
def getfreq(chanlist, cmd):
if chanlist is None:
if cmd < 0x2d0:
freq = float(cmd * 0.025 + 851.0125)
else:
freq = None
else:
if chanlist.get(str(cmd), None) is not None:
freq = float(chanlist[str(cmd)])
else:
freq = None
return freq
def parsefreq(s, chanlist):
retfreq = None
[address, groupflag, command] = s.split(",")
command = int(command)
address = int(address) & 0xFFF0
groupflag = bool(groupflag)
if chanlist is None:
if command < 0x2d0:
retfreq = getfreq(chanlist, command)
else:
if chanlist.get(str(command), None) is not None: #if it falls into the channel somewhere
retfreq = getfreq(chanlist, command)
return [retfreq, address] # mask so the squelch opens up on the entire group
def parse(s, shorttglist, longtglist, chanlist, elimdupes):
#this is the main parser. it takes in commands in the form "address,command" (no quotes of course) and outputs text via print
#it is also responsible for using the talkgroup list, if any
[address, groupflag, command] = s.split(",")
command = int(command)
address = int(address)
lookupaddr = address & 0xFFF0
groupflag = bool(groupflag)
# print "Command is",command
if longtglist is not None and longtglist.get(str(lookupaddr), None) is not None:
longname = longtglist[str(lookupaddr)] #the mask is to screen out e |
rituven/winston | core/Events.py | Python | apache-2.0 | 163 | 0.006135 | c | lass Events(object):
"""
Events Enum
"""
UI_BTN_PRESSED = 100
UI_BTN_RELEASED = 101
UI_BTN_CLICKED = 102
SET_UI_BTN_STATE = | 150
|
natea/Miro-Community | localtv/search/tests.py | Python | agpl-3.0 | 10,029 | 0.001197 | from django.contrib.auth.models import User
from localtv.tests import BaseTestCase
from localtv import models
from localtv.playlists.models import Playlist
from localtv import search
class SearchTokenizeTestCase(BaseTestCase):
"""
Tests for the search query tokenizer.
"""
def assertTokenizes(self, query, result):
self.assertEquals(tuple(search.tokenize(query)),
tuple(result))
def test_split(self):
"""
Space-separated tokens should be split apart.
"""
self.assertTokenizes('foo bar baz', ('foo', 'bar', 'baz'))
def test_quotes(self):
"""
Quoted string should be kept together.
"""
self.assertTokenizes('"foo bar" \'baz bum\'', ('foo bar', 'baz bum'))
def test_negative(self):
"""
Items prefixed with - should keep that prefix, even with quotes.
"""
self.assertTokenizes('-foo -"bar baz"', ('-foo', '-bar baz'))
def test_or_grouping(self):
"""
{}s should group their keywords together.
"""
self.assertTokenizes('{foo {bar baz} bum}', (['foo',
['bar', 'baz'],
'bum'],))
def test_colon(self):
"""
:s should remain part of their word.
"""
self.assertTokenizes('foo:bar', ('foo:bar',))
def test_open_grouping(self):
"""
An open grouping at the end should return all its items.
"""
self.assertTokenizes('{foo bar', (['foo', 'bar'],))
def test_open_quote(self):
"""
An open quote should be stripped.
"""
self.assertTokenizes('"foo', ('foo',))
self.assertTokenizes("'foo", ('foo',))
def test_unicode(self):
"""
Unicode should be handled as regular characters.
"""
self.assertTokenizes(u'espa\xf1a', (u'espa\xf1a',))
def test_unicode_not_latin_1(self):
"""
Non latin-1 characters should be included.
"""
self.assertTokenizes(u'foo\u1234bar', (u'foo\u1234bar',))
def test_blank(self):
"""
A blank query should tokenize to a blank list.
"""
self.assertTokenizes('', ())
class AutoQueryTestCase(BaseTestCase):
fixtures = BaseTestCase.fixtures + ['categories', 'feeds', 'savedsearches',
'videos']
def _rebuild_index(self):
"""
Rebuilds the search index.
"""
from haystack import site
index = site.get_index(models.Video)
index.reindex()
def search(self, query):
return [result.object for result in search.auto_query(query)]
def test_search(self):
"""
The basic query should return videos which contain the search term.
"""
self._rebuild_index()
results = search.auto_query('blender')
self.assertTrue(results)
for result in results:
self.assertTrue('blender' in result.text.lower(), result.text)
def test_search_phrase(self):
"""
Phrases in quotes should be searched for as a phrase.
"""
self._rebuild_index()
results = search.auto_query('"empty mapping"')
self.assertTrue(results)
for result in results:
self.assertTrue('empty mapping' in result.text.lower())
def test_search_includes_tags(self):
"""
Search should search the tags for videos.
"""
video = models.Video.objects.get(pk=20)
video.tags = 'tag1 tag2'
video.save()
self._rebuild_index()
self.assertEquals(self.search('tag1'), [video])
self.assertEquals(self.search('tag2'), [video])
self.assertEquals(self.search('tag2 tag1'), [video])
self.assertEquals(self.search('tag:tag1'), [video])
self.assertEquals(self.search('tag:tag2'), [video])
self.assertEquals(self.search('tag:tag2 tag:tag1'), [video])
def test_search_includes_categories(self):
"""
Search should search the category for videos.
"""
video = models.Video.objects.get(pk=20)
video.categories = [1, 2] # Miro, Linux
video.save()
self._rebuild_index()
self.assertEquals(self.search('Miro'), [video])
self.assertEquals(self.search('Linux'), [video])
self.assertEquals(self.search('Miro Linux'), [video])
self.assertEquals(self.search('category:Miro'), [video]) # name
self.assertEquals(self.search('category:linux'), [video]) # slug
self.assertEquals(self.search('category:1 category:2'), [video]) # pk
def test_search_includes_user(self):
| """
Search should search the user who submitted videos.
"""
video = models.Video.objects.get(pk=20)
video.user = User.objects.get(username='superuser')
video.user.username = 'SuperUser'
video.user.first_name = 'firstname'
video.user.last_name = 'lastname'
video.user.save()
video.save()
video2 = model | s.Video.objects.get(pk=47)
video2.authors = [video.user]
video2.save()
self._rebuild_index()
self.assertEquals(self.search('superuser'), [video2, video])
self.assertEquals(self.search('firstname'), [video2, video])
self.assertEquals(self.search('lastname'), [video2, video])
self.assertEquals(self.search('user:SuperUser'),
[video2, video]) # name
self.assertEquals(self.search('user:superuser'),
[video2, video]) # case-insenstive name
self.assertEquals(self.search('user:%i' % video.user.pk),
[video2, video]) # pk
def test_search_excludes_user(self):
"""
-user:name should exclude that user's videos from the search results.
"""
video = models.Video.objects.get(pk=20)
video.user = User.objects.get(username='superuser')
video.user.username = 'SuperUser'
video.user.first_name = 'firstname'
video.user.last_name = 'lastname'
video.user.save()
video.save()
video2 = models.Video.objects.get(pk=47)
video2.user = video.user
video2.authors = [video.user]
video2.save()
self._rebuild_index()
excluded = self.search('-user:superuser')
for e in excluded:
# should not include the superuser videos
self.assertNotEquals(e, video)
self.assertNotEquals(e, video2)
def test_search_includes_service_user(self):
"""
Search should search the video service user for videos.
"""
video = models.Video.objects.get(pk=20)
video.video_service_user = 'Video_service_user'
video.save()
self._rebuild_index()
self.assertEquals(self.search('video_service_user'), [video])
def test_search_includes_feed_name(self):
"""
Search should search the feed name for videos.
"""
video = models.Video.objects.get(pk=20)
# feed is miropcf
self._rebuild_index()
self.assertEquals(self.search('miropcf'), [video])
self.assertEquals(self.search('feed:miropcf'), [video]) # name
self.assertEquals(self.search('feed:%i' % video.feed.pk), [video]) # pk
def test_search_exclude_terms(self):
"""
Search should exclude terms that start with - (hyphen).
"""
self._rebuild_index()
results = search.auto_query('-blender')
self.assertTrue(results)
for result in results:
self.assertFalse('blender' in result.text.lower())
def test_search_unicode(self):
"""
Search should handle Unicode strings.
"""
self._rebuild_index()
self.assertEquals(self.search(u'espa\xf1a'), [])
def test_search_includes_playlist(self):
"""
Search should include the playlists a video is a part of.
"""
user = User.objects.get(username='user')
|
emmanuj/dials_shortest_path | graph.py | Python | mit | 1,382 | 0.015195 |
#Graph data structure for input graph
| from node import Node
class Graph:
def __init__(self, n):
self.numnodes = n
self.vertices = [] #container for nodes
self.edges = []
for i in range(0,n):
self.vertices.append(Node(i))
self.edges.append([])
self.max_edge_l = 0
self.source = None
def size(self):
return self.numnodes
#add edge to the graph
def add_edge(self, head, tail, edge_length):
| self.edges[head].append((tail, edge_length)) # edges are a tuple of tail and edge length
if edge_length > self.max_edge_l:
self.max_edge_l= edge_length
def neighbors(self, i):
return self.edges[i]
def vertices(self):
return self.vertices
def max_edge_length(self):
return self.max_edge_l
def update_distance(self, id, dist):
self.vertices[id].distance = dist
def get(self, i):
return self.vertices[i]
def update_parent(self,i, p):
self.vertices[i].parent = self.vertices[p]
def set_source(self, i):
self.vertices[i].distance = 0
self.source = self.vertices[i]
def get_source(self):
return self.source.id
#return the length of arc (a,b)
def find_arc_length(self, a,b):
for e in self.edges[a]:
if e[0] == b:
return e[1]
|
Paricitoi/python_4_eng | python_week1/v2/week1_ex8v2.py | Python | gpl-3.0 | 321 | 0.034268 | #!/u | sr/bin/env python
from ciscoconfparse import CiscoConfParse
def main():
cisco_cfg = CiscoConfParse("cisco_ipsec.txt")
cr_map_list = cisco_cfg.find_objects(r"^crypto map CRYPTO")
for item in cr_map_list:
print item.text
for child in item.children:
pr | int child.text
if __name__ == "__main__":
main()
|
openstack/designate | designate/quota/__init__.py | Python | apache-2.0 | 966 | 0 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in | compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli | ed. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from designate.quota import base
LOG = logging.getLogger(__name__)
def get_quota():
quota_driver = cfg.CONF.quota_driver
LOG.debug("Loading quota driver: %s", quota_driver)
cls = base.Quota.get_driver(quota_driver)
return cls()
|
karantan/singer-getting-started | src/main.py | Python | mit | 617 | 0 | from datetime import datetime
from datetime import timezone
imp | ort singer
import urllib.request
def my_ip():
now = datetime.now(timezone.utc).isoformat()
schema = {
'properties': {
'ip': {'type': 'string'},
'timestamp': {'type': 'string', 'format': 'date-time'},
},
}
with urllib.request.urlopen('http://icanhazip.com') as response:
ip = response.read().dec | ode('utf-8').strip()
singer.write_schema('my_ip', schema, 'timestamp')
singer.write_records('my_ip', [{'timestamp': now, 'ip': ip}])
if __name__ == "__main__":
my_ip()
|
Oizopower/Whitecoin-ABE | Abe/DataStore.py | Python | agpl-3.0 | 122,759 | 0.001662 | # Copyright(C) 2011,2012,2013,2014 by Abe developers.
# DataStore.py: back end database access for Abe.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Publi | c License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
# This module combines three functions that might be better split up:
# 1. Abe's schema
# 2. Abstraction over the schema for importing blocks, etc.
# 3. Code to load data by scanning blockfiles or using JSON-RPC.
import os
import re
import errno
import logging
import SqlAbstraction
import Chain
# bitcointools -- modified deserialize.py to return raw transaction
import BCDataStream
import deserialize
import util
import base58
SCHEMA_TYPE = "Abe"
SCHEMA_VERSION = SCHEMA_TYPE + "39"
CONFIG_DEFAULTS = {
"dbtype": None,
"connect_args": None,
"binary_type": None,
"int_type": None,
"upgrade": None,
"rescan": None,
"commit_bytes": None,
"log_sql": None,
"log_rpc": None,
"datadir": None,
"ignore_bit8_chains": None,
"use_firstbits": False,
"keep_scriptsig": True,
"import_tx": [],
"default_loader": "default",
}
WORK_BITS = 304 # XXX more than necessary.
CHAIN_CONFIG = [
{"chain":"Whitecoin"},
#{"chain":"",
# "code3":"", "address_version":"\x", "magic":""},
]
NULL_PUBKEY_HASH = "\0" * Chain.PUBKEY_HASH_LENGTH
NULL_PUBKEY_ID = 0
PUBKEY_ID_NETWORK_FEE = NULL_PUBKEY_ID
# Size of the script and pubkey columns in bytes.
MAX_SCRIPT = 1000000
MAX_PUBKEY = 65
NO_CLOB = 'BUG_NO_CLOB'
# XXX This belongs in another module.
class InvalidBlock(Exception):
pass
class MerkleRootMismatch(InvalidBlock):
def __init__(ex, block_hash, tx_hashes):
ex.block_hash = block_hash
ex.tx_hashes = tx_hashes
def __str__(ex):
return 'Block header Merkle root does not match its transactions. ' \
'block hash=%s' % (ex.block_hash[::-1].encode('hex'),)
class MalformedHash(ValueError):
pass
class MalformedAddress(ValueError):
pass
class DataStore(object):
"""
Bitcoin data storage class based on DB-API 2 and standard SQL with
workarounds to support SQLite3, PostgreSQL/psycopg2, MySQL,
Oracle, ODBC, and IBM DB2.
"""
def __init__(store, args):
"""
Open and store a connection to the SQL database.
args.dbtype should name a DB-API 2 driver module, e.g.,
"sqlite3".
args.connect_args should be an argument to the module's
connect() method, or None for no argument, or a list of
arguments, or a dictionary of named arguments.
args.datadir names Bitcoin data directories containing
blk0001.dat to scan for new blocks.
"""
if args.datadir is None:
args.datadir = util.determine_db_dir()
if isinstance(args.datadir, str):
args.datadir = [args.datadir]
store.args = args
store.log = logging.getLogger(__name__)
store.rpclog = logging.getLogger(__name__ + ".rpc")
if not args.log_rpc:
store.rpclog.setLevel(logging.ERROR)
if args.dbtype is None:
store.log.warn("dbtype not configured, see abe.conf for examples");
store.dbmodule = None
store.config = CONFIG_DEFAULTS.copy()
store.datadirs = []
store.use_firstbits = CONFIG_DEFAULTS['use_firstbits']
return
store.dbmodule = __import__(args.dbtype)
sql_args = lambda: 1
sql_args.module = store.dbmodule
sql_args.connect_args = args.connect_args
sql_args.binary_type = args.binary_type
sql_args.int_type = args.int_type
sql_args.log_sql = args.log_sql
sql_args.prefix = "abe_"
sql_args.config = {}
store.sql_args = sql_args
store.set_db(None)
store.init_sql()
store._blocks = {}
# Read the CONFIG and CONFIGVAR tables if present.
store.config = store._read_config()
if store.config is None:
store.keep_scriptsig = args.keep_scriptsig
elif 'keep_scriptsig' in store.config:
store.keep_scriptsig = store.config.get('keep_scriptsig') == "true"
else:
store.keep_scriptsig = CONFIG_DEFAULTS['keep_scriptsig']
store.refresh_ddl()
if store.config is None:
store.initialize()
else:
store.init_sql()
if store.config['schema_version'] == SCHEMA_VERSION:
pass
elif args.upgrade:
import upgrade
upgrade.upgrade_schema(store)
else:
raise Exception(
"Database schema version (%s) does not match software"
" (%s). Please run with --upgrade to convert database."
% (store.config['schema_version'], SCHEMA_VERSION))
store._sql.auto_reconnect = True
if args.rescan:
store.sql("UPDATE datadir SET blkfile_number=1, blkfile_offset=0")
store._init_datadirs()
store.init_chains()
store.commit_bytes = args.commit_bytes
if store.commit_bytes is None:
store.commit_bytes = 0 # Commit whenever possible.
else:
store.commit_bytes = int(store.commit_bytes)
store.bytes_since_commit = 0
store.use_firstbits = (store.config['use_firstbits'] == "true")
for hex_tx in args.import_tx:
chain_name = None
if isinstance(hex_tx, dict):
chain_name = hex_tx.get("chain")
hex_tx = hex_tx.get("tx")
store.maybe_import_binary_tx(chain_name, str(hex_tx).decode('hex'))
store.default_loader = args.default_loader
store.commit()
def set_db(store, db):
store._sql = db
def get_db(store):
return store._sql
def connect(store):
return store._sql.connect()
def reconnect(store):
return store._sql.reconnect()
def close(store):
store._sql.close()
def commit(store):
store._sql.commit()
def rollback(store):
store._sql.rollback()
def sql(store, stmt, params=()):
store._sql.sql(stmt, params)
def ddl(store, stmt):
store._sql.ddl(stmt)
def selectrow(store, stmt, params=()):
return store._sql.selectrow(stmt, params)
def selectall(store, stmt, params=()):
return store._sql.selectall(stmt, params)
def rowcount(store):
return store._sql.rowcount()
def create_sequence(store, key):
store._sql.create_sequence(key)
def drop_sequence(store, key):
store._sql.drop_sequence(key)
def new_id(store, key):
return store._sql.new_id(key)
def init_sql(store):
sql_args = store.sql_args
if hasattr(store, 'config'):
for name in store.config.keys():
if name.startswith('sql.'):
sql_args.config[name[len('sql.'):]] = store.config[name]
if store._sql:
store._sql.close() # XXX Could just set_flavour.
store.set_db(SqlAbstraction.SqlAbstraction(sql_args))
store.init_binfuncs()
def init_binfuncs(store):
store.binin = store._sql.binin
store.binin_hex = store._sql.binin_hex
store.binin_int = store._sql.binin_int
store.binout = store._sql.binout
store.binout_hex = store._sql.binout_hex
store.binout_int = store._sql.binout_int
|
gengwg/leetcode | 211_add_and_search_word.py | Python | apache-2.0 | 2,673 | 0.001122 | # 211. Add and Search Word - Data structure design
#
# Design a data structure that supports the following two operations:
#
# void addWord(word)
# bool search(word)
#
# search(word) can search a literal word or a regular expression string containing
# only letters a-z or .. A . means it can represent any one letter.
#
# For example:
#
# addWord("bad")
# addWord("dad")
# addWord("mad")
# search("pad") -> false
# search("bad") -> true
# search(".ad") -> true
# search("b..") -> true
# Note:
# You may assume that all words are consist of lowercase letters a-z.
#
# click to show hint.
#
# You should be familiar with how a Trie works. If not, please work on this problem: Implement Trie (Prefix Tree) first.
#
# http://bookshadow.com/weblog/2015/05/16/leetcode-add-and-search-word-data-structure-design/
class TrieNode(object):
def __init__(self):
self.children = {}
#self.children = dict()
self.isWord = False
class WordDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
node = self.root
for letter in word:
if node.children.get(letter) is None:
node.children[letter] = TrieNode() # add a new trie node
node = node.children.get(letter) # move node to next level
node.isWord = True # set the last node to true
def search(self, word):
"""
Returns if the word is in the data structure.
A word could contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
return self.find(self.root, word)
# | dfs
def find(self, node, word):
if word == '': # termination condition
return node.isWord
if word[0] == '.': # if . loop over all children
for x in node.children:
# if any of children returns true, return true
if x and | self.find(node.children[x], word[1:]):
return True
else: # normal find
child = node.children.get(word[0])
if child:
return self.find(child, word[1:])
return False
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
if __name__ == '__main__':
obj = WordDictionary()
obj.addWord("bad")
obj.addWord("dad")
print obj.search("pad")
print obj.search(".ad")
|
muendelezaji/workload-automation | wlauto/workloads/camerarecord/__init__.py | Python | apache-2.0 | 1,671 | 0.001197 | # Copyright 2013-2015 ARM Limited
#
# Licensed under | the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# Se | e the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto import UiAutomatorWorkload, Parameter
class Camerarecord(UiAutomatorWorkload):
name = 'camerarecord'
description = """
Uses in-built Android camera app to record the video for given interval
of time.
"""
package = 'com.google.android.gallery3d'
activity = 'com.android.camera.CameraActivity'
run_timeout = 0
parameters = [
Parameter('recording_time', kind=int, default=60,
description='The video recording time in seconds.'),
]
def __init__(self, device, **kwargs):
super(Camerarecord, self).__init__(device)
self.uiauto_params['recording_time'] = self.recording_time # pylint: disable=E1101
self.run_timeout = 3 * self.uiauto_params['recording_time']
def setup(self, context):
super(Camerarecord, self).setup(context)
self.device.execute('am start -n {}/{}'.format(self.package, self.activity))
def teardown(self, context):
self.device.execute('am force-stop {}'.format(self.package))
super(Camerarecord, self).teardown(context)
|
jdasinger/random | copy_rds.py | Python | gpl-3.0 | 1,743 | 0.004016 | #
# Code per http://blog.powerupcloud.com/2016/03/26/automating-rds-snapshots-with-aws-lambda/
#
import boto3
import botocore
import datetime
import re
SOURCE_REGION = 'us-east-1'
TARGET_REGION = 'us-west-1'
iam = boto3.client('iam')
instances = ['<instance name>'] # convert to command line argument
print('Loading function')
def get_timestamp_or_now(snap):
if 'SnapshotCreateTime' in snap:
return datetime.datetime.isoformat(snap['SnapshotCreateTime'])
else:
return datetime.datetime.isoformat(datetime.datetime.now())
def lambda_handler(event, context):
account_ids = []
try:
iam.get_user()
except Exception as e:
account_ids.append(re.search(r'(arn:aws:sts::)([0-9]+)', str(e)).groups()[1])
account = account_ids[0]
source = boto3.client('rds', region_name=SOURCE_REGION)
for instance in instances:
source_snaps = source.describe_db_snapshots(DBInstanceIdentifier=instance)['DBSnapshots']
source_snap = sorted(source_snaps, key=get_timestamp_or_now, reverse=True)[0]['DBSnapshotIdentifier']
source_snap_arn = 'arn:aws:rds:%s:%s:snapshot:%s' % (SOURCE_REGION, account, source_snap)
target_snap_id = (re.sub('rds:', '', source_snap))
print('Will Copy %s to %s' % (source_snap_arn, target_snap_id))
target = boto3.client('rds', region_name=TARGET_REGI | ON)
try:
response = target.copy_db_snapshot(
SourceDBSnapshotIdentifier=source_snap_arn,
TargetDBSnapshotId | entifier=target_snap_id,
CopyTags=True)
print(response)
except botocore.exceptions.ClientError as e:
raise Exception("Could not issue copy command: %s" % e)
|
demharters/git_scripts | my_elbow_angle_tcr_imgt.py | Python | apache-2.0 | 7,744 | 0.02079 | '''
More information at: http://www.pymolwiki.org/index.php/elbow_angle
Calculate the elbow angle of an antibody Fab complex and optionally draw a
graphical representation of the vectors used to determine the angle.
NOTE: There is no automatic checking of the validity of limit_l and limit_h
values or of the assignment of light and heavy chain IDs. If these are entered
incorrectly or omitted, the reported angle will likely be incorrect.
As always with these things, your mileage may vary. Use at your own risk!
REQUIREMENTS
numpy, version 1.6
http://numpy.scipy.org
transformations.py, version 2012.01.01
by Christoph Gohlke
www.lfd.uci.edu/~gohlke/code
May also require an edit to transformations.py:
Changes `1e-8` to `1e-7` in lines 357 & 363 to avoid a numerical error.
com.py
by Jason Vertrees
http://www.pymolwiki.org/index.php/com
'''
__author__ = 'Jared Sampson'
__version__ = '0.1'
from pymol import cmd
import transformations
import com
import numpy
################################################################################
def calc_super_matrix(mobile,static):
'''
DESCRIPTION
Aligns two objects (or selections), returns the transformation matrix,
and resets the matrix of the mobile object.
Uses CEAlign PyMOL function for alignment.
ARGUMENTS
mobile = string: selection describing the mobile object whose rotation
matrix will be reported
static = string: selection describing the static object onto which the
mobile object will be aligned
REQUIRES: numpy
'''
cmd.cealign(static,mobile)
# cmd.super(mobile,static)
T = cmd.get_object_matrix(mobile)
R = numpy.identity(4)
k=0
for i in range (0,4):
for j in range (0,4):
R[i][j] = T[k]
k+=1
return R
################################################################################
#def elbow_angle(obj,light='L',heavy='H',limit_l=110,limit_h=113,draw=1):
# alpha = light, beta = heavy
# def elbow_angle(obj,light,heavy,limit_l=128,limit_h=127,draw=0):
def elbow_angle(obj,heavy,light,limit_h="1001E",limit_l=1001,draw=0):
"""
DESCRIPTION
Calculates the integer elbow angle of an antibody Fab complex and
optionally draws a graphical representation of the vectors used to
determine the angle.
ARGUMENTS
obj = string: object
light/heavy = strings: chain ID of light and heavy chains, respectively
limit_l/limit_h = integers: residue numbers of the last residue in the
light and heavy chain variable domains, respectively
draw = boolean: Choose whether or not to draw the angle visualization
REQUIRES: com.py, transformations.py, numpy (see above)
"""
# store current view
orig_view = cmd.get_view()
#limit_l = int(limit_l)
#limit_h = int(limit_h)
draw = int(draw)
# for temp object names
tmp_prefix = "tmp_elbow_"
prefix = tmp_prefix + obj + '_'
# names
vl = prefix + 'VL'
vh = prefix + 'VH'
cl = prefix + 'CL'
ch = prefix + 'CH'
# selections
vl_sel = 'polymer and %s and chain %s and resi 1-%i' % (obj, light, limit_l)
vh_sel = 'polymer and %s and chain %s and resi 1-%s & !resi 1001D & !resi 1001C & !resi 1001B & !resi 1001A & !resi 1001' % (obj, heavy, limit_h)
cl_sel = 'polymer and %s and chain %s and not resi 1-%i' % (obj, light, limit_l)
#ch_se | l = 'polymer and %s and chain %s and not resi 1-%i' % (obj, heavy, limit_h)
ch_sel = 'polymer and %s and chain %s and not resi 1-127 and not resi 1001D and not resi 1001C and not resi 1001B and not resi 1001A and not resi 1001' % (obj, heavy)
v_sel = '(('+vl_sel+') or ('+vh_sel+'))'
c_sel = '(('+cl_sel+') or ('+ch_sel+' | ))'
# create temp objects
cmd.create(vl,vl_sel)
cmd.create(vh,vh_sel)
cmd.create(cl,cl_sel)
cmd.create(ch,ch_sel)
# superimpose vl onto vh, calculate axis and angle
Rv = calc_super_matrix(vl,vh)
angle_v,direction_v,point_v = transformations.rotation_from_matrix(Rv)
# superimpose cl onto ch, calculate axis and angle
Rc = calc_super_matrix(cl,ch)
angle_c,direction_c,point_c = transformations.rotation_from_matrix(Rc)
# delete temporary objects
cmd.delete(vl)
cmd.delete(vh)
cmd.delete(cl)
cmd.delete(ch)
# if dot product is positive, angle is acute
if (numpy.dot(direction_v,direction_c)>0):
direction_c = direction_c * -1 # ensure angle is > 90 (need to standardize this)
# TODO: make both directions point away from the elbow axis.
elbow = int(numpy.degrees(numpy.arccos(numpy.dot(direction_v,direction_c))))
# while (elbow < 90):
# elbow = 180 - elbow # limit to physically reasonable range
# compare the direction_v and direction_c axes to the vector defined by
# the C-heavy atoms of limit_l and limit_h of the original fab
hinge_l_sel = "%s//%s/%s/CA" % (obj,light,limit_l)
hinge_h_sel = "%s//%s/%s/CA" % (obj,heavy,limit_h)
hinge_l = cmd.get_atom_coords(hinge_l_sel)
hinge_h = cmd.get_atom_coords(hinge_h_sel)
hinge_vec = numpy.array(hinge_h) - numpy.array(hinge_l)
test = numpy.dot(hinge_vec,numpy.cross(direction_v,direction_c))
if (test > 0):
elbow = 360 - elbow
#print " Elbow angle: %i degrees" % elbow
if (draw==1):
# there is probably a more elegant way to do this, but
# it works so I'm not going to mess with it for now
pre = obj+'_elbow_'
# draw hinge vector
cmd.pseudoatom(pre+"hinge_l",pos=hinge_l)
cmd.pseudoatom(pre+"hinge_h",pos=hinge_h)
cmd.distance(pre+"hinge_vec",pre+"hinge_l",pre+"hinge_h")
cmd.set("dash_gap",0)
# draw the variable domain axis
com_v = com.COM(v_sel)
start_v = [a - 10*b for a, b in zip(com_v, direction_v)]
end_v = [a + 10*b for a, b in zip(com_v, direction_v)]
cmd.pseudoatom(pre+"start_v",pos=start_v)
cmd.pseudoatom(pre+"end_v",pos=end_v)
cmd.distance(pre+"v_vec",pre+"start_v",pre+"end_v")
# draw the constant domain axis
com_c = com.COM(c_sel)
start_c = [a - 10*b for a, b in zip(com_c, direction_c)]
end_c = [a + 10*b for a, b in zip(com_c, direction_c)]
cmd.pseudoatom(pre+"start_c",pos=start_c)
cmd.pseudoatom(pre+"end_c",pos=end_c)
cmd.distance(pre+"c_vec",pre+"start_c",pre+"end_c")
# customize appearance
cmd.hide("labels",pre+"hinge_vec");cmd.hide("labels",pre+"v_vec");cmd.hide("labels",pre+"c_vec");
cmd.color("green",pre+"hinge_l");cmd.color("red",pre+"hinge_h");cmd.color("black",pre+"hinge_vec");
cmd.color("black",pre+"start_v");cmd.color("black",pre+"end_v");cmd.color("black",pre+"v_vec");
cmd.color("black",pre+"start_c");cmd.color("black",pre+"end_c");cmd.color("black",pre+"c_vec")
# draw spheres
cmd.show("spheres",pre+"hinge_l or "+pre+"hinge_h")
cmd.show("spheres",pre+"start_v or "+pre+"start_c")
cmd.show("spheres",pre+"end_v or "+pre+"end_c")
cmd.set("sphere_scale",2)
cmd.set("dash_gap",0,pre+"hinge_vec")
cmd.set("dash_width",5)
cmd.set("dash_radius",0.3)
# group drawing objects
cmd.group(pre,pre+"*")
# restore original view
cmd.set_view(orig_view)
return elbow
def setup_antibody():
my_struc = cmd.load("1mhp_ch.pdb")
my_elbow = elbow_angle(my_struc)
print(my_elbow)
return 0
|
pyql/PyQL | yaccer.py | Python | gpl-3.0 | 9,082 | 0 | # Define the grammar for the Pythonic Query Language.
# the smallest unit is a term
# between any two terms there can be:
# a COMMA - delimits fields and defines explicit query groups:
# a, b, c @ d>1, 2, 3
# a CONJUCTION - python in fields and acts as delimitor for conditions:
# a and b, c or d @ e and f=g
# a COMPARATOR - python in fields and distingishes a singleton from a group:
# a>b, c>d @ e and f=g
# another term - concatenate without query structural relevance
# a field is a tuple of terms
# a condition is a list of lists of tuples of terms,
# a query is: fields@conditions?arguments
from __future__ import print_function
import ply.yacc
import ply.lex
from lexer import tokens
import unittest
class Term(object):
def __init__(self, value, as_term, flavor):
self.value = value
self.as_term = as_term or value
self.flavor = flavor
self.mc = None # the metacode: added in query.py
def __repr__(self):
if self.as_term:
return '<T>value: `%s`, as: `%s`, flavor: `%s`</T>' % (
self.value, self.as_term, self.flavor)
class Query(object):
def __init__(self, fields, conditions=[], arguments=[]):
self.arguments = arguments
self.fields = []
# strip white space in fields
for field in fields:
while not field[0].value.strip():
field = field[1:]
while not field[-1].value.strip():
field = field[:-1]
self.fields.append(field)
self.conditions = conditions
def __repr__(self):
ret = "Fields:\n"
for i, f in enumerate(self.fields):
ret += ' f%d: %s\n' % (i+1, f)
ret += "Conditions:\n"
for i, c in enumerate(self.conditions):
ret += ' c%d: %s len %d\n' % (i+1, c, len(c))
if self.arguments:
ret += "with arguments: %s" % (self.arguments,)
return ret
# query internal to an Aggregator.
# no comma groups allowed here
# which allows us to find arguments
# build parser with start=aggregator_query
def p_aggregator_query(p):
"""aggregator_query : fields
| fields AT singleton_conditions
| fields AT singleton_conditions COMMA fields"""
if len(p) == 2:
p[0] = Query(fields=p[1])
elif len(p) == 4:
p[0] = Query(fields=p[1], conditions=p[3])
elif len(p) == 6:
p[0] = Query(fields=p[1], conditions=p[3], arguments=p[5])
def p_singleton_conditions(p):
"""singleton_conditions : singleton_condition
| singleton_conditions CONJUNCTION singleton_conditions"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]+[[[(Term(p[2], None, 'CONJUNCTION'),)]]]+p[3]
def p_singleton_condition(p):
"""singleton_condition : terms
| singleton_condition COMPARATOR singleton_condition"""
if len(p) == 2:
if type(p[1]) is tuple:
# the extra list here is to normalize terms for conditions
# group-bys have lists of len>1; singletons have lists of len=1
p[0] = [[p[1]]]
else:
p[0 | ] = [p | [1]]
else:
p[0] = p[1] + [[(Term(p[2], None, 'COMPARATOR'),)]] + p[3]
# top-level query
# build parser with start=query
def p_query(p):
"""query : base_query
| base_query QUESTION_MARK fields"""
p[0] = p[1]
if len(p) == 4:
p[0].arguments = p[3]
def p_base_query(p):
"""base_query : fields
| fields AT conditions"""
if len(p) == 2:
p[0] = Query(fields=p[1])
elif len(p) == 4:
p[0] = Query(fields=p[1], conditions=p[3])
def p_fields(p):
"""fields : field
| fields COMMA field"""
if len(p) == 4:
p[0] = p[1]+p[3:]
else:
p[0] = p[1:]
def p_field(p):
"""field : terms
| field_conjunction_field
| field_comparator_field"""
p[0] = p[1]
def p_field_conjunction_field(p):
"""field_conjunction_field : field CONJUNCTION field"""
p[0] = p[1] + (Term(p[2], None, 'CONJUNCTION'),) + p[3]
def p_field_comparator_field(p):
"""field_comparator_field : field COMPARATOR field"""
p[0] = p[1] + (Term(p[2], None, 'COMPARATOR'),) + p[3]
def p_conditions(p):
"""conditions : condition
| conditions CONJUNCTION conditions"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]+[[[(Term(p[2], None, 'CONJUNCTION'),)]]]+p[3]
def p_condition(p):
"""condition : terms
| comma_terms
| condition COMPARATOR condition"""
if len(p) == 2:
if type(p[1]) is tuple:
# the extra list here is to normalize terms for conditions
# group-bys have lists of len>1; singletons have lists of len=1
p[0] = [[p[1]]]
else:
p[0] = [p[1]]
else:
p[0] = p[1] + [[(Term(p[2], None, 'COMPARATOR'),)]] + p[3]
def p_python(p):
"""python : PYTHON
| python PYTHON """
p[0] = ''.join(p[1:])
def p_comma_terms(p):
"""comma_terms : terms COMMA terms
| comma_terms COMMA terms"""
if type(p[1]) is list:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1], p[3]]
def p_terms(p):
"""terms : term"""
p[0] = (p[1],)
def p_terms_term(p):
"""terms : terms term"""
p[0] = p[1]+(p[2],)
def p_term_as(p):
"""term : term_as_string
| term_as_python"""
p[0] = p[1]
def p_term_as_python(p):
"""term_as_python : term AS python"""
p[1].as_term = p[3]
p[0] = p[1]
def p_term_as_string(p):
"""term_as_string : term AS STRING"""
p[1].as_term = p[3][1:-1]
p[0] = p[1]
# each flavor of term has a mapping for python metacode generation in query.py
# Access database column by index (starting at $1).
def p_term_dollar(p):
"""term : DOLLAR"""
p[0] = Term(p[1], None, 'DOLLAR')
# Find the database column by parameter name.
def p_term_parameter(p):
"""term : PARAMETER"""
p[0] = Term(p[1], None, 'PARAMETER')
# Defined in aggregators.py. For example: Average, Sum, Unique.
def p_term_aggregtor(p):
"""term : AGGREGATOR"""
p[0] = Term(p[1], None, 'AGGREGATOR')
# A single or double quoted string.
def p_term_string(p):
"""term : STRING"""
p[0] = Term(p[1], None, 'STRING')
# Names that you do not want to bother quoting.
def p_term_db_string(p):
"""term : DB_STRING"""
p[0] = Term(p[1], None, 'DB_STRING')
# A function or tuple or just (parens) not recognized above.
def p_term_python_function(p):
"""term : PYTHON_FUNCTION"""
p[0] = Term(p[1], None, 'PYTHON_FUNCTION')
# Everything else.
def p_term_python(p):
"""term : python"""
p[0] = Term(p[1], None, 'PYTHON')
def p_error(p):
print("Syntax error in input: %s" % p)
raise Exception("Syntax error in input: %s" % p)
def build_parser(debug=False, start='query'):
return ply.yacc.yacc(debug=debug, start=start)
def test(text, in_aggregator=0):
import logging
logging.basicConfig(
level=logging.DEBUG,
filename="parselog.txt",
filemode="w",
format="%(filename)10s:%(lineno)4d:%(message)s")
log = logging.getLogger()
import lexer
lexer.test(text)
lexer.t_PARAMETER.__doc__ = r'team|hits|runs|errors|quarter\ scores|season'
lexer.t_DB_STRING.__doc__ = r'Cubs|Reds|Mets'
if in_aggregator:
agg_parser = build_parser(debug=True, start='aggregator_query')
else:
parser = build_parser(debug=True, start='query')
qob = ply.yacc.parse(text, debug=log)
return qob
class TestYaccer(unittest.TestCase):
def test_arguments(self):
pyql = """S(runs,N=2) as SumRuns@S(hits@team and season,
N=runs,format='%0.d')>10?output=scatter"""
qob = test(pyql)
self.assertEqual(qob.fields[0][0].flavor, 'AGGREGATOR')
def test_in_aggregator(self):
pyql = "points@team and 1,N=2,M=1"
qob = test(pyql, in_aggregator=1)
self.assert |
ProjectSWGCore/NGECore2 | scripts/mobiles/generic/faction/imperial/imp_warant_officer_ii_1st_class_33.py | Python | lgpl-3.0 | 1,458 | 0.028121 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('crackdown_imperial_warrant_officer_ii_hard')
mobileTemplate.setLevel(33)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("imperial")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("imperial")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_npe_imperial_officer.iff')
mobileTemplate.setTemplates(templates)
weaponTempla | tes = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('imp_warra | nt_offi_1st_class_ii_33', mobileTemplate)
return |
yolanother/ubuntumobidev_ubiquity | tests/test_install_misc.py | Python | gpl-3.0 | 4,712 | 0 | #! /usr/bin/python3
import os
import shutil
import tempfile
import unittest
from ubiquity import install_misc
class InstallMiscTests(unittest.TestCase):
def setUp(self):
self.source = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.source)
self.target = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.target)
def source_path(self, relpath):
return os.path.join(self.source, relpath)
def target_path(self, relpath):
return os.path.join(self.target, relpath)
def try_remove_target(self, relpath):
# Imitate the copy_all context in which remove_target is normally
# run.
st = os.lstat(self.source_path(relpath))
install_misc.remove_target(self.source, self.target, relpath, st)
def test_remove_target_ignores_nonexistent(self):
with open(self.source_path("not-in-target"), "w"):
pass
self.try_remove_target("not-in-target")
def test_remove_target_for_directory_leaves_directory(self):
os.mkdir(self.source_path("dir"))
os.mkdir(self.target_path("dir"))
self.try_remove_target("dir")
self.assertTrue(os.path.isdir(self.target_path("dir")))
def test_remove_target_removes_non_directory(self):
os.mkdir(self.source_path("source-dir-target-file"))
with open(self.target_path("source-dir-target-file"), "w"):
pass
self.try_remove_target("source-dir-target-file")
self.assertFalse(os.path.exists(
self.target_path("source-dir-target-file")))
def test_remove_target_removes_empty_directory(self):
with open(self.source_path("source-file-target-empty-dir"), "w"):
pass
os.mkdir(self.target_path("source-file-target-empty-dir"))
self.try_remove_target("source-file-target-empty-dir")
self.assertFalse(os.path.exists(
self.target_path("source-file-target-empty-dir")))
def test_remove_target_moves_symlink_target_nonexistent(self):
os.makedirs(self.source_path("usr/local"))
os.symlink("share/man", self.source_path("usr/local/man"))
os.makedirs(self.target_path("usr/local/man"))
with open(self.target_path("usr/local/man/file"), "w"):
pass
self.try_remove_target("usr/local/man")
self.assertFalse(os.path.exists(self.target_path("usr/local/man")))
self.assertTrue(
os.path.isdir(self.target_path("usr/local/share/man")))
self.assertTrue(
os.path.isfile(self.target_path("usr/local/share/man/file")))
def test_remove_target_moves_absolute_symlink_target_nonexistent(self):
os.makedirs(self.source_path("usr/local"))
os.symlink("/usr/local/share/man", self.source_path("usr/local/man"))
os.makedirs(self.target_path("usr/local/man"))
with open(self.target_path("usr/local/man/file"), "w"):
pass
self.try_remove_target("usr/local/man")
self.assertFalse(os.path.exists(self.target_path("usr/local/man")))
self.assertTrue(
os.path.isdir(self.target_path("usr/local/share/man")))
self.assertTrue(
os.path.isfile(self.target_path("usr/local/share/man/file")))
def test_remove_target_moves_symlink_target_empty_directory(self):
os.makedirs(self.source_path("usr/local"))
os.symlink("share/man", self.source_path("usr/local/man"))
os.makedirs(self.target_path("usr/local/man"))
with open(self.target_path("usr/local/man/file"), "w"):
pass
os.makedirs(self.target_path("usr/local/share/man"))
self.try_remove_target("usr/local/man")
self.assertFalse(os.path.exists(self.target_path("usr/local/man")))
self.assertTrue(
os.path.isdir(self.target_path("usr/ | local/share/man")))
self.assertTrue(
os.path.isfile(self.target_path("usr/local/share/man/file")))
def test_remove_target_backs_up_non_empty_directory(self):
with open(self.source_path("source-file-target-non-empty-dir"), "w"):
| pass
os.mkdir(self.target_path("source-file-target-non-empty-dir"))
tp = self.target_path("source-file-target-non-empty-dir/file")
with open(tp, "w"):
pass
self.try_remove_target("source-file-target-non-empty-dir")
self.assertFalse(os.path.exists(
self.target_path("source-file-target-non-empty-dir")))
self.assertTrue(os.path.isdir(
self.target_path("source-file-target-non-empty-dir.bak")))
self.assertTrue(os.path.isfile(
self.target_path("source-file-target-non-empty-dir.bak/file")))
|
flarn2006/TPPStreamerBot | tppsb.py | Python | mit | 10,064 | 0.029213 | #!/usr/bin/python
import sys
import re
import thread
import urllib
from time import sleep
from datetime import datetime, timedelta
import requests
import praw
from prawcore.exceptions import *
import irc.bot
# Begin configurable parameters
identity = { # Make sure to set these if they aren't already!
'reddit_client_id': '',
'reddit_client_secret': '',
'reddit_username': '',
'reddit_password': '',
'twitch_client_id': '',
'twitch_irc_nick': '',
'twitch_irc_oauth': ''}
adminIrcNames = ['flarn2006', 'deadinsky']
updaterId = '102szrk71dw9r' # Main updater ID, for the current run or intermission.
updaterId2 = 'z0xcggm226qa' # Secondary updater ID, for mods who talk about other things a lot.
updaterIdTest = 'ty0ak5tjb4fq' # Test updater ID, used in test mode.
modList = ['twitchplayspokemon', 'aissurtievos'] # People whose messages are (almost) always worth posting to the updater.
modList2 = ['projectrevotpp', 'felkcraft'] # Only post these ones to the secondary updater.
testMode = 0 # 0) Normal mode
# 1) Run normally, but post to test updater
# 2) Test mode - read messages from console instead of Twitch chat
# Messages matching any of these regular expressions will be completely ignored.
msgRejectPatterns = [
re.compile('^!'), # Commands beginning with '!' (e.g. '!bet')
re.compile('^_mode '), # Streamer has used this before to manually activate anarchy/democracy.
re.compile('^(?:(?:[abxylrnews]|up|down|left|right|start|select|home|wait|anarchy|democracy|\\d{1,3},\\d{1,3}|move|switch|run|item[0-9]+(p[1-6](m[1-4])?))[0-9]*\\+?)+$', re.I), # Inputs - see http://regex101.com/ for help.
re.compile('https:\/\/(?:www\.)?twitch\.tv\/tankturntactics')] # DatSheffy no spam
# End configurable parameters
prevMsgs = {}
prevMsgTimes = {}
displayNames = {}
ircNames = {}
if len(sys.argv) >= 2:
if sys.argv[1] == '-t':
testMode = 2
elif sys.argv[1] == '-T':
testMode = 1
if testMode > 0:
updaterId = updaterIdTest
updaterId2 = updaterIdTest
modList.append(adminIrcNames[0]) # treat me as a streamer for easier testing
reddit = praw.Reddit(
user_agent = 'TPPStreamerBot, by /u/flarn2006',
client_id = identity['reddit_client_id'],
client_secret = identity['reddit_client_secret'],
username = identity['reddit_username'],
password = identity['reddit_password'])
mentionedUserRegex = re.compile('^@([^.,:\\s]+)')
ircNameRegex = re.compile('^[A-Za-z0-9_]+$')
def getDisplayName(username):
if username in displayNames:
return displayNames[username]
else:
headers = {'Client-ID':identity['twitch_client_id'], 'Accept':'application/vnd.twitchtv.v3+json'}
try:
req = requests.get('https://api.twitch.tv/kraken/users/'+urllib.quote(username), headers=headers)
dn = req.json()[u'display_name']
displayNames[username] = dn
ircNames[dn.lower()] = username
return dn
except Exception as ex:
print '\x1b[1;31m[!] Error getting display name for {}\x1b[m'.format(username)
return username
def getDisplayNameForUpdater(username):
dn = getDisplayName(username)
if dn.lower() != username.lower():
return u'{} ({})'.format(dn, username)
else:
return dn
def getIrcName(displayname):
if ircNameRegex.match(displayname):
# This is a valid Twitch/IRC name on its own. No need to look it up.
return displayname.lower()
elif displayname in ircNames:
# This is a recognized display name. Return its associated username.
return ircNames[displayname]
else:
# Neither a valid Twitch name, nor a recognized display name. Return an empty string to mean no match.
return ''
def isMsgImportant(msg):
for r in msgRejectPatterns:
if r.search(msg):
return False
return True
def escapeMarkdown(text):
result = ''
for c in text:
if c in '\\*[]`^':
result += '\\'
result += c
return result
def postUpdate(updater, msg):
if updater == updaterId2:
print '\x1b[0;37m-> \x1b[1;30m{}\x1b[m'.format(msg.encode('utf-8'))
else:
print '\x1b[1;36m-> \x1b[0;36m{}\x1b[m'.format(msg.encode('utf-8'))
for i in xrange(10):
try:
reddit.request('POST', '/api/live/{}/update'.format(updater), {'api_type':'json', 'body':escapeMarkdown(msg)})
break
except RequestException as ex:
print '\x1b[1;31m[!] ({}/10) Error sending request:\x1b[0;31m {}\x1b[m'.format(i+1, ex)
sleep(1)
except Forbidden:
print "\x1b[1;31m[!] 403 FORBIDDEN: \x1b[0;31mDon't forget to accept the invitation!\x1b[m"
break
def findUsernameInMsg(msg):
match = mentionedUserRegex.match(msg)
if match:
return getIrcName(match.group(1).lower())
else:
return ''
def handleMsg(user, msg):
if isMsgImportant(msg):
# Determine which updater, if any, this message should be posted to.
upd = ''
if user in modList:
upd = updaterId
elif user in modList2:
upd = updaterId2
# Aissurtievos only wants messages beginning with a ` to be posted
if us | er == 'aissurtievos' and not msg.startswith('`'):
upd = ''
if upd != '':
# Message is from a monitored user.
# First, see if the message is a reply to another user, so we can pull their message.
mentionedUser = findUsernameInMsg(msg)
if menti | onedUser != '' and mentionedUser in prevMsgs and mentionedUser not in modList:
# We've got a match! But let's make sure the message was posted recently.
if datetime.now() - prevMsgTimes[mentionedUser] > timedelta(0, 300):
# Looks like it wasn't. Let's remove it from the list and forget about it.
mentionedUser = ''
else:
# Nope, no match. Either nobody was mentioned or we have no message stored from them.
mentionedUser = ''
if mentionedUser == '':
# Standard format update
postUpdate(upd, u'[Streamer] {}: {}'.format(getDisplayName(user), msg))
else:
# Update including message from other user
postUpdate(upd, u'[Streamer] {}: {}\n\n{}: {}'.format(getDisplayNameForUpdater(mentionedUser), prevMsgs[mentionedUser], getDisplayName(user), msg))
# Add the message to the previous messages list.
prevMsgs[user] = msg
prevMsgTimes[user] = datetime.now()
dn = getDisplayName(user)
prevMsgs[dn] = msg
prevMsgTimes[dn] = prevMsgTimes[user]
def handleWhisper(user, msg):
global updaterId
cmd = msg.split(u' ')
cmd[0] = cmd[0].lower()
if cmd[0] == 'lastmsg':
try:
cmd[1] = cmd[1].lower()
if cmd[1] in prevMsgs:
username = cmd[1]
elif getDisplayName(cmd[1]) in prevMsgs:
username = getDisplayName(cmd[1])
else:
return u"{} didn't say anything recently.".format(cmd[1])
return u'[{} ago] {}: {}'.format(datetime.now()-prevMsgTimes[cmd[1]], getDisplayName(cmd[1]), prevMsgs[cmd[1]])
except IndexError:
return 'Usage: lastmsg <username>'
elif cmd[0] == 'update':
if user in adminIrcNames:
text = unicode.join(u' ', cmd[1:])
if text:
postUpdate(updaterId, text)
return 'Update posted to https://reddit.com/live/' + updaterId
else:
return 'Usage: update <text>'
else:
return 'Sorry, you do not have permission to use this command.'
elif cmd[0] == 'setfeed':
if user in adminIrcNames:
try:
if '/' in cmd[1]:
return 'Try again with just the part after the slash, not the whole URL.'
updaterId = cmd[1]
return u'Moved to https://reddit.com/live/{}.\nPlease use the "update" command to test.'.format(updaterId)
except IndexError:
return 'Usage: setfeed <updater id>'
else:
return 'Sorry, you do not have permission to use this command.'
elif cmd[0] == 'getfeed':
return u'Currently posting to https://reddit.com/live/{}.'.format(updaterId)
elif cmd[0] == 'help':
return 'TPPStreamerBot, by /u/flarn2006\n\
lastmsg <user> - Check the last thing said by a user\n\
getfeed - Get the URL of the updater currently being posted to\n\
setfeed <id> - Set the ID of the updater to post to (admin only)\n\
update <text> - Posts a message to the live updater (admin only)'
else:
return u'Unrecognized command "{}"'.format(cmd[0])
def send_whisper(user, msg):
global bot
if msg != '':
print u'\x1b[1;32m[W] {} <- \x1b[0;32m{}\x1b[m'.format(user, msg)
for m in msg.split('\n'):
bot.connection.privmsg('jtv', u'/w {} {}'.format(user, m)[:511])
class Ir |
pkilambi/python-jsonpath-rw | setup.py | Python | apache-2.0 | 1,220 | 0.012295 | import setuptools
import io
import sys
import os.path
import subprocess
setuptools.setup(
name='jsonpath-rw',
version='1.4.0',
description='A robust and significantly extended implementation of JSONPath for Python, with a clear AST for metaprogramming.',
author='Kenneth Knowles',
author_email='kenn.knowles@gmail.com',
url='https://github.com/kennknowles/python-jsonpath-rw',
license='Apache 2.0',
long_description=io.open('README.rst', encoding='utf-8').read(),
packages = ['jsonpath_rw', 'jsonpath_rw.bin'],
entry_points = {
'console_scripts': ['jsonpath.py = jsonpath_rw.bin.jsonpath:entry_point'],
},
test_suite = 'tests',
install_requires = [ 'ply', 'decorator', 'six' ],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python : | : 3',
'Programming L | anguage :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
)
|
sergeyf/scikit-learn | sklearn/ensemble/_voting.py | Python | bsd-3-clause | 19,214 | 0.000364 | """
Soft Voting/Majority Rule classifier and Voting regressor.
This module contains:
- A Soft Voting/Majority Rule classifier for classification estimators.
- A Voting regressor for regression estimators.
"""
# Authors: Sebastian Raschka <se.raschka@gmail.com>,
# Gilles Louppe <g.louppe@gmail.com>,
# Ramil Nugmanov <stsouko@live.ru>
# Mohamed Ali Jamaoui <m.ali.jamaoui@gmail.com>
#
# License: BSD 3 clause
from abc import abstractmethod
import numpy as np
from joblib import Parallel
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..base import TransformerMixin
from ..base import clone
from ._base import _fit_single_estimator
from ._base import _BaseHeterogeneousEnsemble
from ..preprocessing import LabelEncoder
from ..utils import Bunch
from ..utils.metaestimators import available_if
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..utils.validation import column_or_1d
from ..exceptions import NotFittedError
from ..utils._estimator_html_repr import _VisualBlock
from ..utils.fixes import delayed
class _BaseVoting(TransformerMixin, _BaseHeterogeneousEnsemble):
"""Base class for voting.
Warning: This class should not be used directly. Use derived classes
instead.
"""
def _log_message(self, name, idx, total):
if not self.verbose:
return None
return "(%d of %d) Processing %s" % (idx, total, name)
@property
def _weights_not_none(self):
"""Get the weights of not `None` estimators."""
if self.weights is None:
return None
return [w for est, w in zip(self.estimators, self.weights) if est[1] != "drop"]
def _predict(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([est.predict(X) for est in self.estimators_]).T
@abstractmethod
def fit(self, X, y, sample_weight=None):
"""Get common fit operations."""
names, clfs = self._validate_estimators()
if self.weights is not None and len(self.weights) != len(self.estimators):
raise ValueError(
"Number of `estimators` and weights must be equal"
"; got %d weights, %d estimators"
% (len(self.weights), len(self.estimators))
)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(
clone(clf),
X,
y,
sample_weight=sample_weight,
message_clsname="Voting",
message=self._log_message(names[idx], idx + 1, len(clfs)),
)
for idx, clf in enumerate(clfs)
if clf != "drop"
) |
self.named_estimators_ = Bunch()
# Uses 'drop' as placeholder for dropped estimators
est_iter = iter(self.estimators_)
for name, est in self.estimators:
current_est = est if est == "drop" else next(est_iter)
| self.named_estimators_[name] = current_est
if hasattr(current_est, "feature_names_in_"):
self.feature_names_in_ = current_est.feature_names_in_
return self
def fit_transform(self, X, y=None, **fit_params):
"""Return class labels or probabilities for each estimator.
Return predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
Input samples.
y : ndarray of shape (n_samples,), default=None
Target values (None for unsupervised transformations).
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : ndarray array of shape (n_samples, n_features_new)
Transformed array.
"""
return super().fit_transform(X, y, **fit_params)
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
# For consistency with other estimators we raise a AttributeError so
# that hasattr() fails if the estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.estimators_[0].n_features_in_
def _sk_visual_block_(self):
names, estimators = zip(*self.estimators)
return _VisualBlock("parallel", estimators, names=names)
def _more_tags(self):
return {"preserves_dtype": []}
class VotingClassifier(ClassifierMixin, _BaseVoting):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
.. versionadded:: 0.17
Parameters
----------
estimators : list of (str, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to ``'drop'``
using ``set_params``.
.. versionchanged:: 0.21
``'drop'`` is accepted. Using None was deprecated in 0.22 and
support was removed in 0.24.
voting : {'hard', 'soft'}, default='hard'
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like of shape (n_classifiers,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
n_jobs : int, default=None
The number of jobs to run in parallel for ``fit``.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.18
flatten_transform : bool, default=True
Affects shape of transform output only when voting='soft'
If voting='soft' and flatten_transform=True, transform method returns
matrix with shape (n_samples, n_classifiers * n_classes). If
flatten_transform=False, it returns
(n_classifiers, n_samples, n_classes).
verbose : bool, default=False
If True, the time elapsed while fitting will be printed as it
is completed.
.. versionadded:: 0.23
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators as defined in ``estimators``
that are not 'drop'.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
.. versionadded:: 0.20
le_ : :class:`~sklearn.preprocessing.LabelEncoder`
Transformer used to encode the labels during fit and decode during
prediction.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying classifier exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
VotingRegressor : Prediction voting regressor.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = |
Makeystreet/makeystreet | woot/apps/catalog/migrations/0029_auto__del_field_toindexstore_basemodel_ptr__add_field_toindexstore_id.py | Python | apache-2.0 | 26,077 | 0.007363 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ToIndexStore.basemodel_ptr'
db.delete_column(u'catalog_toindexstore', u'basemodel_ptr_id')
# Adding field 'ToIndexStore.id'
db.execute('ALTER TABLE "catalog_toindexstore" ADD COLUMN "id" SERIAL NOT NULL PRIMARY KEY')
# db.add_column(u'catalog_toindexstore', u'id',
# self.gf('django.db.models.fields.AutoField')(default=1, primary_key=True),
# keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'ToIndexStore.basemodel_ptr'
raise RuntimeError("Cannot reverse this migration. 'ToIndexStore.basemodel_ptr' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'ToIndexStore.basemodel_ptr'
db.add_column(u'catalog_toindexstore', u'basemodel_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['catalog.BaseModel'], unique=True, primary_key=True),
keep_default=False)
# Deleting field 'ToIndexStore.id'
db.delete_column(u'catalog_toindexstore', u'id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'catalog.abstractlike': {
'Meta': {'object_name': 'AbstractLike', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'liked_time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.abstracttop': {
'Meta': {'object_name': 'AbstractTop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'recorded_time': ('django.db.models.fields.DateTimeField', [], {})
},
'catalog.basemodel': {
'Meta': {'object_name': 'BaseModel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment', '_orm | bases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', | [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'})
},
'catalog.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"})
},
'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproducttutorial': {
'Meta': {'object_name': 'LikeProductTutorial', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'tutorial': ('django.db.models.field |
beanbaginc/django-evolution | django_evolution/compat/py23.py | Python | bsd-3-clause | 1,420 | 0 | """Compatibility functions for Python 2 and 3."""
from __future__ import unicode_literals
import io
from django_evolution.compat import six
from django_evolution.compat.picklers import DjangoCompatUnpickler
from django_evolution.compat.six.moves import cPickle as pickle
def pickle_dumps(obj):
"""Return a pickled representat | ion of an object.
This will always use Pickle protocol 0, which is the default on Python 2,
for compatibility across Python 2 and 3.
Args:
obj (object):
The object to dump.
Returns:
unicode:
The Unico | de pickled representation of the object, safe for storing
in the database.
"""
return pickle.dumps(obj, protocol=0).decode('latin1')
def pickle_loads(pickled_str):
"""Return the unpickled data from a pickle payload.
Args:
pickled_str (bytes):
The pickled data.
Returns:
object:
The unpickled data.
"""
if isinstance(pickled_str, six.text_type):
pickled_str = pickled_str.encode('latin1')
try:
return pickle.loads(pickled_str)
except AttributeError:
# We failed to load something from the pickled data. We have to try
# again with our own unpickler, which unfortunately won't benefit from
# cPickle, but it at least lets us remap things.
return DjangoCompatUnpickler(io.BytesIO(pickled_str)).load()
|
buhii/tomato | tests.py | Python | mit | 3,294 | 0.002732 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tomato.tests
"""
import unittest
import msgpack
import time
try:
from PIL import Image
except ImportError:
import Image
from tomato.swf_processor import Swf
from tomato.exceptions_tomato import MovieClipDoesNotExist
from tomato.utils import bits_list2string, Bits, SignedBits as SB, FixedPointBits as FB, MATRIX
def test_matrix(scale=None, rotate=None, translate=(0,0)):
m1 = MATRIX().generate(
scale=scale,
translate=translate,
rotate=rotate
)
m2 = MATRIX()
if scale:
m2.setattr_value('scale_x', scale[0])
m2.setattr_value('scale_y', scale[1])
if rotate:
m2.setattr_value('rotate_skew0', rotate[0])
m2.setattr_value('rotate_skew1', rotate[1])
if translate:
m2.setattr_value('tran | slate_x', translate[0])
m2.setattr_value('translate_y', translate[1])
m2.generate_bits()
return m1.value == m2.value
class TestSwfProcessor(unittest.TestCase):
def setUp(self):
self.swf_bitmap = Swf(open('sample/bitmap/bitmap.swf').read())
self.swf_tank = Swf(open('sample/mc/tank.swf').read())
def test_bits(self):
int_num = 31415
signed_num = - | 27182
float_num = 1.6180339
self.assertEqual(int_num, int(Bits(int_num)))
self.assertEqual(signed_num, int(SB(signed_num)))
self.assertAlmostEqual(float_num, float(FB(float_num)), 4)
def test_bits2string(self):
spam_string = "This is a spam!"
self.assertEqual(spam_string, bits_list2string([Bits(spam_string)]))
def test_matrixes(self):
self.assertEqual(True, test_matrix())
self.assertEqual(True, test_matrix(translate=(1250, 744)))
self.assertEqual(True, test_matrix(scale=(2,4, 3.7)))
self.assertEqual(True, test_matrix(scale=(-55, -66), translate=(1250, 744)))
self.assertEqual(True, test_matrix(rotate=(-2.4, -3.8)))
self.assertEqual(True, test_matrix(rotate=(33, 66), translate=(1250, 744)))
self.assertEqual(True, test_matrix(scale=(77, 44), rotate=(1,5, -3.7)))
self.assertEqual(True, test_matrix(translate=(1250, 744), rotate=(-1, -1), scale=(-3, -1)))
def test_fields_io_serialize_and_deserialize(self):
m1 = MATRIX().generate(
scale=(2.4, 3.7),
translate=(1500, 1500))
tpl = m1.serialize()
m2 = MATRIX().deserialize(tpl)
self.assertEqual(m1.value, m2.value)
def test_getting_movie_clip(self):
self.assertNotEqual(None, self.swf_tank.get_movie_clip('kombu'))
self.assertRaises(MovieClipDoesNotExist,
self.swf_bitmap.get_movie_clip, 'this_is_not_spam')
def test_delete_movie_clip(self):
self.swf_tank.delete_movie_clip('kombu')
self.swf_tank.write(open('sample/mc/tank_without_kombu.swf', 'w'))
def test_copy_swf(self):
c_tank = self.swf_tank.copy()
c_bitmap = self.swf_bitmap.copy()
self.assertEqual(c_tank.write(), self.swf_tank.write())
self.assertEqual(c_bitmap.write(), self.swf_bitmap.write())
c_tank.write(open('sample/mc/copy_tank.swf', 'w'))
c_bitmap.write(open('sample/mc/copy_bitmap.swf', 'w'))
if __name__ == '__main__':
unittest.main()
|
wubr2000/googleads-python-lib | examples/dfa/v1_20/create_spotlight_activity_group.py | Python | apache-2.0 | 2,085 | 0.004796 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in | writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WAR | RANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a new activity group for a given spotlight configuration.
To get spotlight tag configuration, run get_advertisers.py. To get activity
types, run get_activity_types.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfa
SPOTLIGHT_CONFIGURATION_ID = 'INSERT_SPOTLIGHT_CONFIGURATION_ID_HERE'
ACTIVITY_TYPE = 'INSERT_ACTIVITY_TYPE_HERE'
GROUP_NAME = 'INSERT_GROUP_NAME_HERE'
def main(client, spotlight_configuration_id, activity_type, group_name):
# Initialize appropriate service.
spotlight_service = client.GetService(
'spotlight', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Construct and save spotlight activity group.
spotlight_activity_group = {
'name': group_name,
'spotlightConfigurationId': spotlight_configuration_id,
'groupType': activity_type
}
result = spotlight_service.saveSpotlightActivityGroup(
spotlight_activity_group)
# Display results.
print 'Spotlight activity group with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client, SPOTLIGHT_CONFIGURATION_ID, ACTIVITY_TYPE, GROUP_NAME)
|
djoproject/pyshell | pyshell/utils/test/misc_test.py | Python | gpl-3.0 | 2,472 | 0 | #!/usr/bin/env python -t
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Jonathan Delvaux <pyshell@djoproject.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TODO use os.path.join in place of s | tring concatenation
import os
import shutil
import tempfile
import pytest
from pyshell.utils.exception import DefaultPyshellException
from | pyshell.utils.misc import createParentDirectory
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
class TestMisc(object):
def test_createParentDirectory1(self):
file_path = tempfile.gettempdir() + os.sep + "plop.txt"
assert os.path.exists(tempfile.gettempdir())
assert not os.path.exists(file_path)
createParentDirectory(file_path)
assert os.path.exists(tempfile.gettempdir())
assert not os.path.exists(file_path)
def test_createParentDirectory2(self):
shutil.rmtree(tempfile.gettempdir() + os.sep + "toto", True)
path = os.sep.join(("toto", "tata", "titi", "test.txt"))
path = tempfile.gettempdir() + os.sep + path
assert not os.path.exists(os.path.dirname(path))
assert not os.path.exists(path)
createParentDirectory(path)
assert os.path.exists(os.path.dirname(path))
assert not os.path.exists(path)
shutil.rmtree(tempfile.gettempdir() + os.sep + "toto", True)
def test_createParentDirectory3(self):
shutil.rmtree(tempfile.gettempdir() + os.sep + "toto", True)
os.makedirs(tempfile.gettempdir() + os.sep + "toto")
touch(tempfile.gettempdir() + os.sep + "toto" + os.sep + "plop")
path = (tempfile.gettempdir() + os.sep + "toto" + os.sep + "plop" +
os.sep + "test.txt")
with pytest.raises(DefaultPyshellException):
createParentDirectory(path)
shutil.rmtree(tempfile.gettempdir() + os.sep + "toto", True)
|
cevaris/pants | src/python/pants/backend/python/targets/python_requirement_library.py | Python | apache-2.0 | 1,293 | 0.006187 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.python.python_requirement import PythonRequirement
from pants.base.payload import Payload
from pants.base.payload_field import PythonRequirementsField
from pants.base.validation import assert_list
from pants.build_graph.target import Target
class PythonRequireme | ntLibrary(Target):
"""A set of pip requirements.
:API: public
"""
def __init__(self, payload=None, requirements=None, **kwargs):
"""
:param requirements: pip requirements as `python_requirement <#python_requirement>`_\s.
:type requirements: List of python_requirement calls
"""
payload = payload or Payload()
assert_list(requirements, expected_type=PythonRequirement, key_arg='requirements')
payload.add_fields({
'requirements': PythonRequirementsField | (requirements or []),
})
super(PythonRequirementLibrary, self).__init__(payload=payload, **kwargs)
self.add_labels('python')
@property
def requirements(self):
return self.payload.requirements
|
gkc1000/pyscf | pyscf/scf/hf_symm.py | Python | apache-2.0 | 36,696 | 0.003706 | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-re | lativistic restricted Hartree-Fock with point group symmetry.
The symmetry are not handled in a separate data structure. Note that during
the SCF iteration, the orbitals are grouped in terms of symmetry irreps.
But the orbitals in the result are sorted based on the orbital energies.
Func | tion symm.label_orb_symm can be used to detect the symmetry of the
molecular orbitals.
'''
import time
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import symm
from pyscf.lib import logger
from pyscf.scf import hf
from pyscf.scf import rohf
from pyscf.scf import chkfile
from pyscf import __config__
WITH_META_LOWDIN = getattr(__config__, 'scf_analyze_with_meta_lowdin', True)
MO_BASE = getattr(__config__, 'MO_BASE', 1)
# mo_energy, mo_coeff, mo_occ are all in nosymm representation
def analyze(mf, verbose=logger.DEBUG, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
'''Analyze the given SCF object: print orbital energies, occupancies;
print orbital coefficients; Occupancy for each irreps; Mulliken population analysis
'''
from pyscf.lo import orth
from pyscf.tools import dump_mat
mol = mf.mol
if not mol.symmetry:
return hf.analyze(mf, verbose, with_meta_lowdin, **kwargs)
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
mo_coeff = mf.mo_coeff
ovlp_ao = mf.get_ovlp()
log = logger.new_logger(mf, verbose)
if log.verbose >= logger.NOTE:
mf.dump_scf_summary(log)
nirrep = len(mol.irrep_id)
orbsym = get_orbsym(mf.mol, mo_coeff, ovlp_ao, False)
wfnsym = 0
noccs = [sum(orbsym[mo_occ>0]==ir) for ir in mol.irrep_id]
if mol.groupname in ('SO3', 'Dooh', 'Coov'):
log.note('TODO: total wave-function symmetry for %s', mol.groupname)
else:
log.note('Wave-function symmetry = %s',
symm.irrep_id2name(mol.groupname, wfnsym))
log.note('occupancy for each irrep: ' + (' %4s'*nirrep), *mol.irrep_name)
log.note(' ' + (' %4d'*nirrep), *noccs)
log.note('**** MO energy ****')
irname_full = {}
for k,ir in enumerate(mol.irrep_id):
irname_full[ir] = mol.irrep_name[k]
irorbcnt = {}
for k, j in enumerate(orbsym):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
log.note('MO #%d (%s #%d), energy= %.15g occ= %g',
k+MO_BASE, irname_full[j], irorbcnt[j],
mo_energy[k], mo_occ[k])
if log.verbose >= logger.DEBUG:
label = mol.ao_labels()
molabel = []
irorbcnt = {}
for k, j in enumerate(orbsym):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
molabel.append('#%-d(%s #%d)' %
(k+MO_BASE, irname_full[j], irorbcnt[j]))
if with_meta_lowdin:
log.debug(' ** MO coefficients (expansion on meta-Lowdin AOs) **')
orth_coeff = orth.orth_ao(mol, 'meta_lowdin', s=ovlp_ao)
c = reduce(numpy.dot, (orth_coeff.conj().T, ovlp_ao, mo_coeff))
else:
log.debug(' ** MO coefficients (expansion on AOs) **')
c = mo_coeff
dump_mat.dump_rec(mf.stdout, c, label, molabel, start=MO_BASE, **kwargs)
dm = mf.make_rdm1(mo_coeff, mo_occ)
if with_meta_lowdin:
pop_and_charge = mf.mulliken_meta(mol, dm, s=ovlp_ao, verbose=log)
else:
pop_and_charge = mf.mulliken_pop(mol, dm, s=ovlp_ao, verbose=log)
dip = mf.dip_moment(mol, dm, verbose=log)
return pop_and_charge, dip
def get_irrep_nelec(mol, mo_coeff, mo_occ, s=None):
'''Electron numbers for each irreducible representation.
Args:
mol : an instance of :class:`Mole`
To provide irrep_id, and spin-adapted basis
mo_coeff : 2D ndarray
Regular orbital coefficients, without grouping for irreps
mo_occ : 1D ndarray
Regular occupancy, without grouping for irreps
Returns:
irrep_nelec : dict
The number of electrons for each irrep {'ir_name':int,...}.
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz', symmetry=True, verbose=0)
>>> mf = scf.RHF(mol)
>>> mf.scf()
-76.016789472074251
>>> scf.hf_symm.get_irrep_nelec(mol, mf.mo_coeff, mf.mo_occ)
{'A1': 6, 'A2': 0, 'B1': 2, 'B2': 2}
'''
orbsym = get_orbsym(mol, mo_coeff, s, False)
irrep_nelec = dict([(mol.irrep_name[k], int(sum(mo_occ[orbsym==ir])))
for k, ir in enumerate(mol.irrep_id)])
return irrep_nelec
def canonicalize(mf, mo_coeff, mo_occ, fock=None):
'''Canonicalization diagonalizes the Fock matrix in occupied, open,
virtual subspaces separatedly (without change occupancy).
'''
mol = mf.mol
if not mol.symmetry:
return hf.canonicalize(mf, mo_coeff, mo_occ, fock)
if fock is None:
dm = mf.make_rdm1(mo_coeff, mo_occ)
fock = mf.get_hcore() + mf.get_veff(mf.mol, dm)
coreidx = mo_occ == 2
viridx = mo_occ == 0
openidx = ~(coreidx | viridx)
mo = numpy.empty_like(mo_coeff)
mo_e = numpy.empty(mo_occ.size)
if getattr(mo_coeff, 'orbsym', None) is not None:
orbsym = mo_coeff.orbsym
irreps = set(orbsym)
for ir in irreps:
idx0 = orbsym == ir
for idx1 in (coreidx, openidx, viridx):
idx = idx0 & idx1
if numpy.count_nonzero(idx) > 0:
orb = mo_coeff[:,idx]
f1 = reduce(numpy.dot, (orb.conj().T, fock, orb))
e, c = scipy.linalg.eigh(f1)
mo[:,idx] = numpy.dot(mo_coeff[:,idx], c)
mo_e[idx] = e
else:
s = mf.get_ovlp()
for idx in (coreidx, openidx, viridx):
if numpy.count_nonzero(idx) > 0:
orb = mo_coeff[:,idx]
f1 = reduce(numpy.dot, (orb.conj().T, fock, orb))
e, c = scipy.linalg.eigh(f1)
c = numpy.dot(mo_coeff[:,idx], c)
mo[:,idx] = _symmetrize_canonicalization_(mf, e, c, s)
mo_e[idx] = e
orbsym = get_orbsym(mol, mo, s, False)
mo = lib.tag_array(mo, orbsym=orbsym)
return mo_e, mo
def _symmetrize_canonicalization_(mf, mo_energy, mo_coeff, s):
'''Restore symmetry for canonicalized orbitals
'''
def search_for_degeneracy(mo_energy):
idx = numpy.where(abs(mo_energy[1:] - mo_energy[:-1]) < 1e-6)[0]
return numpy.unique(numpy.hstack((idx, idx+1)))
mol = mf.mol
degidx = search_for_degeneracy(mo_energy)
logger.debug1(mf, 'degidx %s', degidx)
if degidx.size > 0:
esub = mo_energy[degidx]
csub = mo_coeff[:,degidx]
scsub = numpy.dot(s, csub)
emin = abs(esub).min() * .5
es = []
cs = []
for i,ir in enumerate(mol.irrep_id):
so = mol.symm_orb[i]
sosc = numpy.dot(so.conj().T, scsub)
s_ir = reduce(numpy.dot, (so.conj().T, s, so))
fock_ir = numpy.dot(sosc*esub, sosc.conj().T)
mo_energy, u = mf._eigh(fock_ir, s_ir)
idx = abs(mo_energy) > emin
es.append(mo_energy[idx])
cs.append(numpy.dot(mol.symm_orb[i], u[:,idx]))
es = numpy.hstack(es).round(7)
|
rdo-management/heat | heat/db/sqlalchemy/migrate_repo/versions/024_event_resource_name.py | Python | apache-2.0 | 807 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
impor | t sqlalchemy
def upgrade(migrate_engine):
meta = sqlalch | emy.MetaData()
meta.bind = migrate_engine
event = sqlalchemy.Table('event', meta, autoload=True)
event.c.logical_resource_id.alter(name='resource_name')
|
Saftophobia/shunting | data/__init__.py | Python | mit | 28 | 0.035714 | __au | thor__ = 'saftophob | ia'
|
sciosci/nsf_data_ingestion | nsf_data_ingestion/arxiv/fetch_data_hdfs_loop.py | Python | apache-2.0 | 2,651 | 0.000754 | from datetime import datetime
import urllib.request
import time
from subprocess import call
import sys
import os
retry_time = 45
# save_path = ""
def get_raw_data(path):
start = time.clock()
query = "http://export.arxiv.org/oai2?verb=ListRecords&metadataPrefix=oai_dc"
print("request: %s" % (query))
request = urllib.request.Request(query)
response = urllib.request.urlopen(request).read().decode('utf-8')
rawfile = open('papers_0.xml', 'w')
rawfile.write(response)
rawfile.close()
save_to_hdfs("papers_0.xml", path)
end = time.clock()
print("takes: %f s" % (end - start))
pos_start = response.rfind('<resumptionToken')
pos_end = response.rfind('</resumptionToken')
if pos_end > 0 and pos_end > pos_start:
pos = response.rfind('>', pos_start, pos_end)
resume_token = response[pos + 1:pos_end]
print("request_resume: %s" % (resume_token))
get_resume(resume_token, path)
def get_resume(token, path):
repeat = 0
time.sleep(retry_time)
start = time.clock()
filen = 'papers_%s.xml' % (token.replace('|', '_'))
rawfile = open(filen, 'w')
try:
query = "http://export.arxiv.org/oai2?verb=ListRecords&resumptionToken=%s" % (token)
request = urllib.request.Request(query)
response = urllib.request.urlopen(request).read().decode('utf-8')
rawfile.write(response)
rawfile.close()
save_to_hdfs(filen, path)
end = time.clock()
print("takes: %f s" % (end - start))
pos_start = response.rfind('<resumptionToken')
pos_end = response.rfind('</resumptionToken')
if pos_end > 0 and pos_end > pos_start and repeat < 20:
pos = response.rfind('>', pos_start, pos_end)
resume_token = response[po | s + 1:pos_end]
print("request_resume: %s" % (resume_token))
get_resume(resume_token, path)
| repeat += 1
except Exception as err:
print(err)
print("retry resume_token: %s" % (token))
time.sleep(30)
get_resume(token)
def save_to_hdfs(filename, path):
print(">>>>>>>", path)
file = os.path.join(path, filename)
try:
out = call("hdfs dfs -test -e %s" % (file), shell=True)
if out == 0:
call("hdfs dfs -rm %s" % (file), shell=True)
print("file %s exists, delete old one" % (file))
call(['hdfs', 'dfs', '-put', filename, path])
print("send %s to hdfs" % (filename))
except Exception as e:
print("save %s to hdfs failed" % (filename))
if __name__ == '__main__':
get_raw_data(path=sys.argv[1])
|
noplay/gns3-gui | gns3/modules/dynamips/pages/frame_relay_switch_configuration_page.py | Python | gpl-3.0 | 6,628 | 0.00166 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Configuration page for Dynamips Frame Relay switches.
"""
from gns3.qt import QtCore, QtGui
from ..ui.frame_relay_switch_configuration_page_ui import Ui_frameRelaySwitchConfigPageWidget
class FrameRelaySwitchConfigurationPage(QtGui.QWidget, Ui_frameRelaySwitchConfigPageWidget):
"""
QWidget configuration page for Frame Relay switches.
"""
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
self._mapping = {}
# connect slots
self.uiAddPushButton.clicked.connect(self._addMappingSlot)
self.uiDeletePushButton.clicked.connect(self._deleteMappingSlot)
self.uiMappingTreeWidget.itemActivated.connect(self._mappingSelectedSlot)
self.uiMappingTreeWidget.itemSelectionChanged.connect(self._mappingSelectionChangedSlot)
# enable sorting
self.uiMappingTreeWidget.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.uiMappingTreeWidget.setSortingEnabled(True)
def _mappingSelectedSlot(self, item, column):
"""
Loads a selected mapping from the tree widget.
:param item: selected QTreeWidgetItem instance
:param column: ignored
"""
(source_port, source_dlci) = item.text(0).split(':')
(destination_port, destination_dlci) = item.text(1).split(':')
self.uiSourcePortSpinBox.setValue(int(source_port))
self.uiSourceDLCISpinBox.setValue(int(source_dlci))
self.uiDestinationPortSpinBox.setValue(int(destination_port))
self.uiDestinationDLCISpinBox.setValue(int(destination_dlci))
def _mappingSelectionChangedSlot(self):
"""
Enables the use of the delete button.
"""
item = self.uiMappingTreeWidget.currentItem()
if item is not None:
self.uiDeletePushButton.setEnabled(True)
else:
self.uiDeletePushButton.setEnabled(False)
def _addMappingSlot(self):
"""
Adds a new mapping.
"""
source_port = self.uiSourcePortSpinBox.value()
source_dlci = self.uiSourceDLCISpinBox.value()
destination_port = self.uiDestinationPortSpinBox.value()
destination_dlci = self.uiDestinationDLCISpinBox.value()
if source_port == destination_port:
QtGui.QMessageBox.critical(self, self._node.name(), "Same source and destination ports")
return
source = "{port}:{dlci}".format(port=source_port, dlci=source_dlci)
destination = "{port}:{dlci}".format(port=destination_port, dlci=destination_dlci)
if source in self._mapping or destination in self._mapping:
QtGui.QMessageBox.critical(self, self._node.name(), "Mapping already defined")
return
item = QtGui.QTreeWidgetItem(self.uiMappingTreeWidget)
item.setText(0, source)
item.setText(1, destination)
self.uiMappingTreeWidget.addTopLevelItem(item)
self.uiSourcePortSpinBox.setValue(source_port + 1)
self.uiSourceDLCISpinBox.setValue(source_dlci + 1)
self.uiDestinationPortSpinBox.setValue(destination_port + 1)
self.uiDestinationDLCISpinBox.setValue(destination_dlci + 1)
self._mapping[source] = destination
def _deleteMappingSlot(self):
"""
Deletes a mapping.
"""
item = self.uiMappingTreeWidget.currentItem()
if item:
# connected_ports = self.node.getConnectedInterfaceList()
source = item.text(0)
source_port = int(source.split(':')[0])
destination = item.text(1)
destination_port = int(destination.split(':')[0])
# check that a link isn't connected to these ports
# before we delete that mapping
node_ports = self._node.ports()
for node_port in node_ports:
if (node_port.portNumber() == source_port or node_port.portNumber() == destination_port) and not node_port.isFree():
QtGui.QMessageBox.critical(self, self._node.name(), "A link is connected to port {}, please remove it first".format(node_port.name()))
return
del self._mapping[source]
self.uiMappingTreeWidget.takeTopLevelItem(self.uiMappingTreeWidget.indexOfTopLevelItem(item))
def loadSettings(self, settings, node, group=False):
"""
Loads the Frame-Relay switch settings.
:param settings: the settings (dictionary)
:param node: Node instance
:param group: indicates the settings apply to a group
"""
| if not group:
self.uiNameLineEdit.se | tText(settings["name"])
else:
self.uiNameLineEdit.setEnabled(False)
self.uiMappingTreeWidget.clear()
self._mapping = {}
self._node = node
for source, destination in settings["mappings"].items():
item = QtGui.QTreeWidgetItem(self.uiMappingTreeWidget)
item.setText(0, source)
item.setText(1, destination)
self.uiMappingTreeWidget.addTopLevelItem(item)
self._mapping[source] = destination
self.uiMappingTreeWidget.resizeColumnToContents(0)
self.uiMappingTreeWidget.resizeColumnToContents(1)
def saveSettings(self, settings, node, group=False):
"""
Saves the Frame-Relay switch settings.
:param settings: the settings (dictionary)
:param node: Node instance
:param group: indicates the settings apply to a group
"""
if not group:
# set the device name
name = self.uiNameLineEdit.text()
if not name:
QtGui.QMessageBox.critical(self, "Name", "Frame relay switch name cannot be empty!")
else:
settings["name"] = name
else:
del settings["name"]
settings["mappings"] = self._mapping.copy()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.