code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Create your views here.
from models import *
from django.contrib.auth.decorators import login_required, permission_required
from django.template import RequestContext, Template, Context
import json
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.http import HttpResponse, Http404
from django.contrib.auth.models import Permission, User
from django.conf import settings
from django.core.context_processors import csrf
from django.shortcuts import render_to_response, get_object_or_404, redirect
from datetime import datetime, timedelta, date
from django.db.models import Count, Sum, Max, Q
from django.contrib import messages
import plistlib
import ast
from forms import *
import pprint
import re
import os
from yapsy.PluginManager import PluginManager
from django.core.exceptions import PermissionDenied
import utils
import pytz
# import logging
# logging.basicConfig(level=logging.DEBUG)
@login_required
def index(request):
# Get the current user's Business Units
user = request.user
# Count the number of users. If there is only one, they need to be made a GA
if User.objects.count() == 1:
# The first user created by syncdb won't have a profile. If there isn't one, make sure they get one.
try:
profile = UserProfile.objects.get(user=user)
except UserProfile.DoesNotExist:
profile = UserProfile(user=user)
profile.level = 'GA'
profile.save()
user_level = user.userprofile.level
now = datetime.now()
hour_ago = now - timedelta(hours=1)
today = date.today()
week_ago = today - timedelta(days=7)
month_ago = today - timedelta(days=30)
three_months_ago = today - timedelta(days=90)
config_installed = 'config' in settings.INSTALLED_APPS
if user_level != 'GA':
# user has many BU's display them all in a friendly manner
business_units = user.businessunit_set.all()
if user.businessunit_set.count() == 0:
c = {'user': request.user, }
return render_to_response('server/no_access.html', c, context_instance=RequestContext(request))
if user.businessunit_set.count() == 1:
# user only has one BU, redirect to it
for bu in user.businessunit_set.all():
return redirect('server.views.bu_dashboard', bu_id=bu.id)
break
if user_level == 'GA':
machines = Machine.objects.all()
else:
machines = Machine.objects.none()
for business_unit in user.businessunit_set.all():
for group in business_unit.machinegroup_set.all():
machines = machines | group.machine_set.all()
# Load in the default plugins if needed
utils.loadDefaultPlugins()
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
output = []
# Get all the enabled plugins
enabled_plugins = Plugin.objects.all().order_by('order')
for enabled_plugin in enabled_plugins:
# Loop round the plugins and print their names.
for plugin in manager.getAllPlugins():
if plugin.name == enabled_plugin.name:
data = {}
data['name'] = plugin.name
(data['html'], data['width']) = plugin.plugin_object.show_widget('front', machines)
output.append(data)
break
output = utils.orderPluginOutput(output)
# get the user level - if they're a global admin, show all of the machines. If not, show only the machines they have access to
if user_level == 'GA':
business_units = BusinessUnit.objects.all()
else:
business_units = user.businessunit_set.all()
c = {'user': request.user, 'business_units': business_units, 'output': output, }
return render_to_response('server/index.html', c, context_instance=RequestContext(request))
# Manage Users
@login_required
def manage_users(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
# We require you to be staff to manage users
if user.is_staff != True:
return redirect(index)
users = User.objects.all()
c = {'user':request.user, 'users':users, 'request':request}
return render_to_response('server/manage_users.html', c, context_instance=RequestContext(request))
# New User
@login_required
def new_user(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
# We require you to be staff to manage users
if user.is_staff != True:
return redirect(index)
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
user_profile = UserProfile.objects.get(user=user)
user_profile.level=request.POST['user_level']
user_profile.save()
return redirect('manage_users')
else:
form = NewUserForm()
c = {'form': form}
return render_to_response('forms/new_user.html', c, context_instance=RequestContext(request))
@login_required
def edit_user(request, user_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
# We require you to be staff to manage users
if user.is_staff != True:
return redirect(index)
the_user = get_object_or_404(User, pk=int(user_id))
c = {}
c.update(csrf(request))
if request.method == 'POST':
if the_user.has_usable_password:
form = EditUserForm(request.POST)
else:
form = EditLDAPUserForm(request.POST)
if form.is_valid():
user = form.save()
user_profile = UserProfile.objects.get(user=the_user)
user_profile.level=request.POST['user_level']
user_profile.save()
if user_profile.level != 'GA':
user.is_staff = False
user.save()
return redirect('manage_users')
else:
if the_user.has_usable_password:
form = EditUserForm({'user_level':the_user.userprofile.level, 'user_id':the_user.id})
else:
form = EditLDAPUserForm({'user_level':the_user.userprofile.level, 'user_id':the_user.id})
c = {'form': form, 'the_user':the_user}
return render_to_response('forms/edit_user.html', c, context_instance=RequestContext(request))
@login_required
def user_add_staff(request, user_id):
user_level = request.user.userprofile.level
if user_level != 'GA':
return redirect(index)
if request.user.id == int(user_id):
# You shouldn't have been able to get here anyway
return redirect('manage_users')
user = get_object_or_404(User, pk=int(user_id))
user.is_staff = True
user.save()
return redirect('manage_users')
@login_required
def user_remove_staff(request, user_id):
user_level = request.user.userprofile.level
if user_level != 'GA':
return redirect(index)
if request.user.id == int(user_id):
# You shouldn't have been able to get here anyway
return redirect('manage_users')
user = get_object_or_404(User, pk=int(user_id))
user.is_staff = False
user.save()
return redirect('manage_users')
def delete_user(request, user_id):
user_level = request.user.userprofile.level
if user_level != 'GA':
return redirect(index)
if request.user.id == int(user_id):
# You shouldn't have been able to get here anyway
return redirect('manage_users')
user = get_object_or_404(User, pk=int(user_id))
user.delete()
return redirect('manage_users')
# Plugin machine list
@login_required
def machine_list(request, pluginName, data, page='front', theID=None):
user = request.user
title = None
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
# get a list of machines (either from the BU or the group)
if page == 'front':
# get all machines
if user.userprofile.level == 'GA':
machines = Machine.objects.all()
else:
machines = Machine.objects.none()
for business_unit in user.businessunit_set.all():
for group in business_unit.machinegroup_set.all():
machines = machines | group.machine_set.all()
if page == 'bu_dashboard':
# only get machines for that BU
# Need to make sure the user is allowed to see this
business_unit = get_object_or_404(BusinessUnit, pk=theID)
machine_groups = MachineGroup.objects.filter(business_unit=business_unit).prefetch_related('machine_set').all()
if machine_groups.count() != 0:
machines_unsorted = machine_groups[0].machine_set.all()
for machine_group in machine_groups[1:]:
machines_unsorted = machines_unsorted | machine_group.machine_set.all()
else:
machines_unsorted = None
machines=machines_unsorted
if page == 'group_dashboard':
# only get machines from that group
machine_group = get_object_or_404(MachineGroup, pk=theID)
# check that the user has access to this
machines = Machine.objects.filter(machine_group=machine_group)
# send the machines and the data to the plugin
for plugin in manager.getAllPlugins():
if plugin.name == pluginName:
(machines, title) = plugin.plugin_object.filter_machines(machines, data)
c = {'user':user, 'machines': machines, 'req_type': page, 'title': title, 'bu_id': theID, 'request':request }
return render_to_response('server/overview_list_all.html', c, context_instance=RequestContext(request))
# New BU
@login_required
def new_business_unit(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = BusinessUnitForm(request.POST)
if form.is_valid():
new_business_unit = form.save(commit=False)
new_business_unit.save()
form.save_m2m()
return redirect('bu_dashboard', new_business_unit.id)
else:
form = BusinessUnitForm()
c = {'form': form}
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
return render_to_response('forms/new_business_unit.html', c, context_instance=RequestContext(request))
# Edit BU
@login_required
def edit_business_unit(request, bu_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
business_unit = get_object_or_404(BusinessUnit, pk=int(bu_id))
c = {}
c.update(csrf(request))
if request.method == 'POST':
if user.is_staff:
form = EditUserBusinessUnitForm(request.POST, instance=business_unit)
else:
form = EditBusinessUnitForm(request.POST, instance=business_unit)
if form.is_valid():
new_business_unit = form.save(commit=False)
new_business_unit.save()
form.save_m2m()
return redirect('bu_dashboard', new_business_unit.id)
else:
if user.is_staff:
form = EditUserBusinessUnitForm(instance=business_unit)
else:
form = EditBusinessUnitForm(instance=business_unit)
c = {'form': form, 'business_unit':business_unit}
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
return render_to_response('forms/edit_business_unit.html', c, context_instance=RequestContext(request))
@login_required
def delete_business_unit(request, bu_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
business_unit = get_object_or_404(BusinessUnit, pk=int(bu_id))
config_installed = 'config' in settings.INSTALLED_APPS
machine_groups = business_unit.machinegroup_set.all()
machines = []
# for machine_group in machine_groups:
# machines.append(machine_group.machine_set.all())
machines = Machine.objects.filter(machine_group__business_unit=business_unit)
print machines
c = {'user': user, 'business_unit':business_unit, 'config_installed':config_installed, 'machine_groups': machine_groups, 'machines':machines}
return render_to_response('server/business_unit_delete_confirm.html', c, context_instance=RequestContext(request))
@login_required
def really_delete_business_unit(request, bu_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
business_unit = get_object_or_404(BusinessUnit, pk=int(bu_id))
business_unit.delete()
return redirect(index)
# BU Dashboard
@login_required
def bu_dashboard(request, bu_id):
user = request.user
user_level = user.userprofile.level
business_unit = get_object_or_404(BusinessUnit, pk=bu_id)
bu = business_unit
config_installed = 'config' in settings.INSTALLED_APPS
if business_unit not in user.businessunit_set.all() and user_level != 'GA':
print 'not letting you in ' + user_level
return redirect(index)
# Get the groups within the Business Unit
machine_groups = business_unit.machinegroup_set.all()
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
machines = utils.getBUmachines(bu_id)
now = datetime.now()
hour_ago = now - timedelta(hours=1)
today = date.today()
week_ago = today - timedelta(days=7)
month_ago = today - timedelta(days=30)
three_months_ago = today - timedelta(days=90)
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
output = []
# Get all the enabled plugins
enabled_plugins = Plugin.objects.all().order_by('order')
for enabled_plugin in enabled_plugins:
# Loop round the plugins and print their names.
for plugin in manager.getAllPlugins():
if plugin.name == enabled_plugin.name:
data = {}
data['name'] = plugin.name
(data['html'], data['width']) = plugin.plugin_object.show_widget('bu_dashboard', machines, bu.id)
output.append(data)
break
output = utils.orderPluginOutput(output, 'bu_dashboard', bu.id)
c = {'user': request.user, 'machine_groups': machine_groups, 'is_editor': is_editor, 'business_unit': business_unit, 'user_level': user_level, 'output':output, 'config_installed':config_installed }
return render_to_response('server/bu_dashboard.html', c, context_instance=RequestContext(request))
# Overview list (all)
@login_required
def overview_list_all(request, req_type, data, bu_id=None):
# get all the BU's that the user has access to
user = request.user
user_level = user.userprofile.level
operating_system = None
activity = None
inactivity = None
disk_space = None
now = datetime.now()
hour_ago = now - timedelta(hours=1)
today = date.today()
week_ago = today - timedelta(days=7)
month_ago = today - timedelta(days=30)
three_months_ago = today - timedelta(days=90)
mem_4_gb = 4 * 1024 * 1024
mem_415_gb = 4.15 * 1024 * 1024
mem_775_gb = 7.75 * 1024 * 1024
mem_8_gb = 8 * 1024 * 1024
if req_type == 'operating_system':
operating_system = data
if req_type == 'activity':
activity = data
if req_type == 'inactivity':
inactivity = data
if req_type == 'disk_space_ok':
disk_space_ok = data
if req_type == 'disk_space_warning':
disk_space_warning = data
if req_type == 'disk_space_alert':
disk_space_alert = data
if req_type == 'mem_ok':
disk_space_alert = data
if req_type == 'mem_warning':
disk_space_alert = data
if req_type == 'mem_alert':
disk_space_alert = data
if req_type == 'pending_updates':
pending_update = data
if req_type == 'pending_apple_updates':
pending_apple_update = data
if bu_id != None:
business_units = get_object_or_404(BusinessUnit, pk=bu_id)
machine_groups = MachineGroup.objects.filter(business_unit=business_units).prefetch_related('machine_set').all()
machines_unsorted = machine_groups[0].machine_set.all()
for machine_group in machine_groups[1:]:
machines_unsorted = machines_unsorted | machine_group.machine_set.all()
all_machines=machines_unsorted
# check user is allowed to see it
if business_units not in user.businessunit_set.all():
if user_level != 'GA':
print 'not letting you in ' + user_level
return redirect(index)
else:
# all BUs the user has access to
business_units = user.businessunit_set.all()
# get all the machine groups
# business_unit = business_units[0].machinegroup_set.all()
machines_unsorted = Machine.objects.none()
for business_unit in business_units:
for machine_group in business_unit.machinegroup_set.all():
#print machines_unsorted
machines_unsorted = machines_unsorted | machine_group.machine_set.all()
#machines_unsorted = machines_unsorted | machine_group.machines.all()
#machines = user.businessunit_set.select_related('machine_group_set').order_by('machine')
all_machines = machines_unsorted
if user_level == 'GA':
business_units = BusinessUnit.objects.all()
all_machines = Machine.objects.all()
if req_type == 'errors':
machines = all_machines.filter(errors__gt=0)
if req_type == 'warnings':
machines = all_machines.filter(warnings__gt=0)
if req_type == 'active':
machines = all_machines.filter(activity__isnull=False)
if req_type == 'disk_space_ok':
machines = all_machines.filter(hd_percent__lt=80)
if req_type == 'disk_space_warning':
machines = all_machines.filter(hd_percent__range=["80", "89"])
if req_type == 'disk_space_alert':
machines = all_machines.filter(hd_percent__gte=90)
if req_type == 'mem_ok':
machines = all_machines.filter(memory_kb__gte=mem_8_gb)
if req_type == 'mem_warning':
machines = all_machines.filter(memory_kb__range=[mem_4_gb, mem_775_gb])
if req_type == 'mem_alert':
machines = all_machines.filter(memory_kb__lt=mem_4_gb)
if req_type == 'uptime_ok':
machines = all_machines.filter(fact__fact_name='uptime_days', fact__fact_data__lte=1)
if req_type == 'uptime_warning':
machines = all_machines.filter(fact__fact_name='uptime_days', fact__fact_data__range=[1,7])
if req_type == 'uptime_alert':
machines = all_machines.filter(fact__fact_name='uptime_days', fact__fact_data__gt=7)
if activity is not None:
if data == '1-hour':
machines = all_machines.filter(last_checkin__gte=hour_ago)
if data == 'today':
machines = all_machines.filter(last_checkin__gte=today)
if data == '1-week':
machines = all_machines.filter(last_checkin__gte=week_ago)
if inactivity is not None:
if data == '1-month':
machines = all_machines.filter(last_checkin__range=(three_months_ago, month_ago))
if data == '3-months':
machines = all_machines.exclude(last_checkin__gte=three_months_ago)
if operating_system is not None:
machines = all_machines.filter(operating_system__exact=operating_system)
if req_type == 'pending_updates':
machines = all_machines.filter(pendingupdate__update=pending_update)
if req_type == 'pending_apple_updates':
machines = all_machines.filter(pendingappleupdate__update=pending_apple_update)
c = {'user':user, 'machines': machines, 'req_type': req_type, 'data': data, 'bu_id': bu_id }
return render_to_response('server/overview_list_all.html', c, context_instance=RequestContext(request))
# Machine Group Dashboard
@login_required
def group_dashboard(request, group_id):
# check user is allowed to access this
user = request.user
config_installed = 'config' in settings.INSTALLED_APPS
user_level = user.userprofile.level
machine_group = get_object_or_404(MachineGroup, pk=group_id)
business_unit = machine_group.business_unit
if business_unit not in user.businessunit_set.all():
if user_level != 'GA':
return redirect(index)
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
machines = machine_group.machine_set.all()
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
output = []
# Get all the enabled plugins
enabled_plugins = Plugin.objects.all().order_by('order')
for enabled_plugin in enabled_plugins:
# Loop round the plugins and print their names.
for plugin in manager.getAllPlugins():
if plugin.name == enabled_plugin.name:
data = {}
data['name'] = plugin.name
(data['html'], data['width']) = plugin.plugin_object.show_widget('group_dashboard', machines, machine_group.id)
output.append(data)
break
output = utils.orderPluginOutput(output, 'group_dashboard', machine_group.id)
c = {'user': request.user, 'machine_group': machine_group, 'user_level': user_level, 'is_editor': is_editor, 'business_unit': business_unit, 'output':output, 'config_installed':config_installed, 'request':request}
return render_to_response('server/group_dashboard.html', c, context_instance=RequestContext(request))
# New Group
@login_required
def new_machine_group(request, bu_id):
c = {}
c.update(csrf(request))
business_unit = get_object_or_404(BusinessUnit, pk=bu_id)
if request.method == 'POST':
form = MachineGroupForm(request.POST)
if form.is_valid():
new_machine_group = form.save(commit=False)
new_machine_group.business_unit = business_unit
new_machine_group.save()
#form.save_m2m()
return redirect('group_dashboard', new_machine_group.id)
else:
form = MachineGroupForm()
user = request.user
user_level = user.userprofile.level
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
if business_unit not in user.businessunit_set.all() or is_editor == False:
if user_level != 'GA':
return redirect(index)
c = {'form': form, 'is_editor': is_editor, 'business_unit': business_unit, }
return render_to_response('forms/new_machine_group.html', c, context_instance=RequestContext(request))
# Edit Group
@login_required
def edit_machine_group(request, group_id):
c = {}
c.update(csrf(request))
machine_group = get_object_or_404(MachineGroup, pk=group_id)
business_unit = machine_group.business_unit
user = request.user
user_level = user.userprofile.level
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
if business_unit not in user.businessunit_set.all() or is_editor == False:
if user_level != 'GA':
return redirect(index)
if request.method == 'POST':
form = EditMachineGroupForm(request.POST, instance=machine_group)
if form.is_valid():
machine_group.save()
#form.save_m2m()
return redirect('group_dashboard', machine_group.id)
else:
form = EditMachineGroupForm(instance=machine_group)
c = {'form': form, 'is_editor': is_editor, 'business_unit': business_unit, 'machine_group':machine_group}
return render_to_response('forms/edit_machine_group.html', c, context_instance=RequestContext(request))
# Delete Group
# New machine
@login_required
def new_machine(request, group_id):
c = {}
c.update(csrf(request))
machine_group = get_object_or_404(MachineGroup, pk=group_id)
business_unit = machine_group.business_unit
if request.method == 'POST':
form = NewMachineForm(request.POST)
if form.is_valid():
new_machine = form.save(commit=False)
new_machine.machine_group = machine_group
new_machine.save()
#form.save_m2m()
return redirect('machine_detail', new_machine.id)
else:
form = NewMachineForm()
user = request.user
user_level = user.userprofile.level
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
if business_unit not in user.businessunit_set.all() or is_editor == False:
if user_level != 'GA':
return redirect(index)
c = {'form': form, 'is_editor': is_editor, 'machine_group': machine_group, }
return render_to_response('forms/new_machine.html', c, context_instance=RequestContext(request))
# Machine detail
@login_required
def machine_detail(request, machine_id):
# check the user is in a BU that's allowed to see this Machine
machine = get_object_or_404(Machine, pk=machine_id)
machine_group = machine.machine_group
business_unit = machine_group.business_unit
user = request.user
user_level = user.userprofile.level
if business_unit not in user.businessunit_set.all():
if user_level != 'GA':
return redirect(index)
report = machine.get_report()
if machine.facts.count() != 0:
facts = machine.facts.all()
if settings.EXCLUDED_FACTS:
for excluded in settings.EXCLUDED_FACTS:
facts = facts.exclude(fact_name=excluded)
else:
facts = None
if machine.conditions.count() != 0:
conditions = machine.conditions.all()
# get the IP address(es) from the condition
try:
ip_address = conditions.get(machine=machine, condition_name__exact='ipv4_address')
ip_address = ip_address.condition_data
except:
ip_address = None
if settings.EXCLUDED_CONDITIONS:
for excluded in settings.EXCLUDED_CONDITIONS:
conditions = conditions.exclude(condition_name=excluded)
else:
conditions = None
ip_address = None
install_results = {}
for result in report.get('InstallResults', []):
nameAndVers = result['name'] + '-' + result['version']
if result['status'] == 0:
install_results[nameAndVers] = "installed"
else:
install_results[nameAndVers] = 'error'
if install_results:
for item in report.get('ItemsToInstall', []):
name = item.get('display_name', item['name'])
nameAndVers = ('%s-%s'
% (name, item['version_to_install']))
item['install_result'] = install_results.get(
nameAndVers, 'pending')
for item in report.get('ManagedInstalls', []):
if 'version_to_install' in item:
name = item.get('display_name', item['name'])
nameAndVers = ('%s-%s'
% (name, item['version_to_install']))
if install_results.get(nameAndVers) == 'installed':
item['installed'] = True
# handle items that were removed during the most recent run
# this is crappy. We should fix it in Munki.
removal_results = {}
for result in report.get('RemovalResults', []):
m = re.search('^Removal of (.+): (.+)$', result)
if m:
try:
if m.group(2) == 'SUCCESSFUL':
removal_results[m.group(1)] = 'removed'
else:
removal_results[m.group(1)] = m.group(2)
except IndexError:
pass
if removal_results:
for item in report.get('ItemsToRemove', []):
name = item.get('display_name', item['name'])
item['install_result'] = removal_results.get(
name, 'pending')
if item['install_result'] == 'removed':
if not 'RemovedItems' in report:
report['RemovedItems'] = [item['name']]
elif not name in report['RemovedItems']:
report['RemovedItems'].append(item['name'])
config_installed = 'config' in settings.INSTALLED_APPS
if 'managed_uninstalls_list' in report:
report['managed_uninstalls_list'].sort()
if config_installed:
from config.views import filter_uninstalls
report['managed_uninstalls_list'] = filter_uninstalls(business_unit.id, report['managed_uninstalls_list'])
c = {'user':user, 'machine_group': machine_group, 'business_unit': business_unit, 'report': report, 'install_results': install_results, 'removal_results': removal_results, 'machine': machine, 'facts':facts, 'conditions':conditions, 'ip_address':ip_address, 'config_installed':config_installed }
return render_to_response('server/machine_detail.html', c, context_instance=RequestContext(request))
# Edit Machine
# Delete Machine
@login_required
def delete_machine(request, machine_id):
machine = get_object_or_404(Machine, pk=machine_id)
machine_group = machine.machine_group
business_unit = machine_group.business_unit
user = request.user
user_level = user.userprofile.level
if business_unit not in user.businessunit_set.all():
if user_level != 'GA':
return redirect(index)
machine.delete()
return redirect('group_dashboard', machine_group.id)
@login_required
def settings_page(request):
user = request.user
user_level = user.userprofile.level
# Pull the historical_data setting
try:
historical_setting = SalSetting.objects.get(name='historical_retention')
print historical_setting
except SalSetting.DoesNotExist:
historical_setting = SalSetting(name='historical_retention', value='180')
historical_setting.save()
historical_setting_form = SettingsHistoricalDataForm(initial={'days': historical_setting.value})
if user_level != 'GA':
return redirect(index)
c = {'user':request.user, 'request':request, 'historical_setting_form':historical_setting_form}
return render_to_response('server/settings.html', c, context_instance=RequestContext(request))
@login_required
def settings_historical_data(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = SettingsHistoricalDataForm(request.POST)
# check whether it's valid:
if form.is_valid():
try:
historical_setting = SalSetting.objects.get(name='historical_retention')
except SalSetting.DoesNotExist:
historical_setting = SalSetting(name='historical_retention')
historical_setting.value = form.cleaned_data['days']
historical_setting.save()
messages.success(request, 'Data retention settings saved.')
return redirect('settings_page')
else:
return redirect('settings_page')
@login_required
def plugins_page(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
# Load the plugins
utils.reloadPluginsModel()
enabled_plugins = Plugin.objects.all()
disabled_plugins = utils.disabled_plugins()
c = {'user':request.user, 'request':request, 'enabled_plugins':enabled_plugins, 'disabled_plugins':disabled_plugins}
return render_to_response('server/plugins.html', c, context_instance=RequestContext(request))
@login_required
def plugin_plus(request, plugin_id):
user = request.user
profile = UserProfile.objects.get(user=user)
user_level = profile.level
if user_level != 'GA':
return redirect('server.views.index')
# get current plugin order
current_plugin = get_object_or_404(Plugin, pk=plugin_id)
# get 'old' next one
old_plugin = get_object_or_404(Plugin, order=(int(current_plugin.order)+1))
current_plugin.order = current_plugin.order + 1
current_plugin.save()
old_plugin.order = old_plugin.order - 1
old_plugin.save()
return redirect('plugins_page')
@login_required
def plugin_minus(request, plugin_id):
user = request.user
profile = UserProfile.objects.get(user=user)
user_level = profile.level
if user_level != 'GA':
return redirect('server.views.index')
# get current plugin order
current_plugin = get_object_or_404(Plugin, pk=plugin_id)
#print current_plugin
# get 'old' previous one
old_plugin = get_object_or_404(Plugin, order=(int(current_plugin.order)-1))
current_plugin.order = current_plugin.order - 1
current_plugin.save()
old_plugin.order = old_plugin.order + 1
old_plugin.save()
return redirect('plugins_page')
@login_required
def plugin_disable(request, plugin_id):
user = request.user
profile = UserProfile.objects.get(user=user)
user_level = profile.level
if user_level != 'GA':
return redirect('server.views.index')
plugin = get_object_or_404(Plugin, pk=plugin_id)
plugin.delete()
return redirect('plugins_page')
@login_required
def plugin_enable(request, plugin_name):
# only do this if there isn't a plugin already with the name
try:
plugin = Plugin.objects.get(name=plugin_name)
except Plugin.DoesNotExist:
plugin = Plugin(name=plugin_name, order=utils.UniquePluginOrder())
plugin.save()
return redirect('plugins_page')
@login_required
def api_keys(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
api_keys = ApiKey.objects.all()
c = {'user':request.user, 'api_keys':api_keys, 'request':request}
return render_to_response('server/api_keys.html', c, context_instance=RequestContext(request))
@login_required
def new_api_key(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = ApiKeyForm(request.POST)
if form.is_valid():
new_api_key = form.save()
return redirect('display_api_key', key_id=new_api_key.id)
else:
form = ApiKeyForm()
c = {'form': form}
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
return render_to_response('forms/new_api_key.html', c, context_instance=RequestContext(request))
@login_required
def display_api_key(request, key_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
api_key = get_object_or_404(ApiKey, pk=int(key_id))
if api_key.has_been_seen == True:
return redirect(index)
else:
api_key.has_been_seen = True
api_key.save()
c = {'user':request.user, 'api_key':api_key, 'request':request}
return render_to_response('server/api_key_display.html', c, context_instance=RequestContext(request))
@login_required
def edit_api_key(request, key_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
api_key = get_object_or_404(ApiKey, pk=int(key_id))
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = ApiKeyForm(request.POST, instance=api_key)
if form.is_valid():
api_key = form.save()
return redirect(api_keys)
else:
form = ApiKeyForm(instance=api_key)
c = {'form': form, 'api_key':api_key}
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
return render_to_response('forms/edit_api_key.html', c, context_instance=RequestContext(request))
@login_required
def delete_api_key(request, key_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
api_key = get_object_or_404(ApiKey, pk=int(key_id))
api_key.delete()
return redirect(api_keys)
# preflight
@csrf_exempt
def preflight(request):
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
output = {}
output['queries'] = {}
for plugin in manager.getAllPlugins():
counter = 0
try:
if plugin.plugin_object.plugin_type() == 'osquery':
# No other plugins will have info for this
for query in plugin.plugin_object.get_queries():
name = query['name']
del query['name']
output['queries'][name] = {}
output['queries'][name] = query
except:
break
return HttpResponse(json.dumps(output))
# checkin
@csrf_exempt
def checkin(request):
if request.method != 'POST':
print 'not post data'
raise Http404
data = request.POST
key = data.get('key')
serial = data.get('serial')
serial = serial.upper()
# Take out some of the weird junk VMware puts in. Keep an eye out in case Apple actually uses these:
serial = serial.replace('/', '')
serial = serial.replace('+', '')
# Are we using Sal for some sort of inventory (like, I don't know, Puppet?)
try:
add_new_machines = settings.ADD_NEW_MACHINES
except:
add_new_machines = True
if add_new_machines == True:
# look for serial number - if it doesn't exist, create one
if serial:
try:
machine = Machine.objects.get(serial=serial)
except Machine.DoesNotExist:
machine = Machine(serial=serial)
else:
machine = get_object_or_404(Machine, serial=serial)
if key is None or key == 'None':
try:
key = settings.DEFAULT_MACHINE_GROUP_KEY
except Exception:
pass
machine_group = get_object_or_404(MachineGroup, key=key)
business_unit = machine_group.business_unit
try:
historical_setting = SalSetting.objects.get(name='historical_retention')
historical_days = historical_setting.value
except SalSetting.DoesNotExist:
historical_setting = SalSetting(name='historical_retention', value='180')
historical_setting.save()
historical_days = '180'
if machine:
machine.hostname = data.get('name', '<NO NAME>')
try:
use_enc = settings.USE_ENC
# If we're using Sal's Puppet ENC, don't change the machine group,
# as we're setting it in the GUI
except:
use_enc = False
if use_enc == False:
machine.machine_group = machine_group
machine.last_checkin = datetime.now()
if 'username' in data:
machine.username = data.get('username')
if 'base64bz2report' in data:
machine.update_report(data.get('base64bz2report'))
if 'sal_version' in data:
machine.sal_version = data.get('sal_version')
# extract machine data from the report
report_data = machine.get_report()
if 'Puppet_Version' in report_data:
machine.puppet_version = report_data['Puppet_Version']
if 'ManifestName' in report_data:
manifest = report_data['ManifestName']
machine.manifest = manifest
if 'MachineInfo' in report_data:
machine.operating_system = report_data['MachineInfo'].get(
'os_vers', 'UNKNOWN')
# some machines are reporting 10.9, some 10.9.0 - make them the same
if len(machine.operating_system) <= 4:
machine.operating_system = machine.operating_system + '.0'
machine.hd_space = report_data.get('AvailableDiskSpace') or 0
machine.hd_total = int(data.get('disk_size')) or 0
machine.hd_percent = int(round(((float(machine.hd_total)-float(machine.hd_space))/float(machine.hd_total))*100))
machine.munki_version = report_data.get('ManagedInstallVersion') or 0
hwinfo = {}
if 'SystemProfile' in report_data.get('MachineInfo', []):
for profile in report_data['MachineInfo']['SystemProfile']:
if profile['_dataType'] == 'SPHardwareDataType':
hwinfo = profile._items[0]
break
if 'Puppet' in report_data:
puppet = report_data.get('Puppet')
if 'time' in puppet:
machine.last_puppet_run = datetime.fromtimestamp(float(puppet['time']['last_run']))
if 'events' in puppet:
machine.puppet_errors = puppet['events']['failure']
if hwinfo:
machine.machine_model = hwinfo.get('machine_model')
machine.cpu_type = hwinfo.get('cpu_type')
machine.cpu_speed = hwinfo.get('current_processor_speed')
machine.memory = hwinfo.get('physical_memory')
if hwinfo.get('physical_memory')[-2:] == 'MB':
memory_mb = float(hwinfo.get('physical_memory')[:-3])
machine.memory_kb = int(memory_mb * 1024)
if hwinfo.get('physical_memory')[-2:] == 'GB':
memory_gb = float(hwinfo.get('physical_memory')[:-3])
machine.memory_kb = int(memory_gb * 1024 * 1024)
if hwinfo.get('physical_memory')[-2:] == 'TB':
memory_tb = float(hwinfo.get('physical_memory')[:-3])
machine.memory_kb = int(memory_tb * 1024 * 1024 * 1024)
if 'os_family' in report_data:
machine.os_family = report_data['os_family']
machine.save()
# Remove existing PendingUpdates for the machine
updates = machine.pending_updates.all()
updates.delete()
if 'ItemsToInstall' in report_data:
for update in report_data.get('ItemsToInstall'):
display_name = update.get('display_name', update['name'])
update_name = update.get('name')
version = str(update['version_to_install'])
pending_update = PendingUpdate(machine=machine, display_name=display_name, update_version=version, update=update_name)
pending_update.save()
# Remove existing PendingAppleUpdates for the machine
updates = machine.pending_apple_updates.all()
updates.delete()
if 'AppleUpdates' in report_data:
for update in report_data.get('AppleUpdates'):
display_name = update.get('display_name', update['name'])
update_name = update.get('name')
version = str(update['version_to_install'])
pending_update = PendingAppleUpdate(machine=machine, display_name=display_name, update_version=version, update=update_name)
pending_update.save()
# if Facter data is submitted, we need to first remove any existing facts for this machine
if 'Facter' in report_data:
facts = machine.facts.all()
facts.delete()
# Delete old historical facts
try:
datelimit = datetime.now() - timedelta(days=historical_days)
HistoricalFact.objects.filter(fact_recorded__lt=datelimit).delete()
except Exception:
pass
try:
historical_facts = settings.HISTORICAL_FACTS
except Exception:
historical_facts = []
pass
# now we need to loop over the submitted facts and save them
for fact_name, fact_data in report_data['Facter'].iteritems():
fact = Fact(machine=machine, fact_name=fact_name, fact_data=fact_data)
fact.save()
if fact_name in historical_facts:
fact = HistoricalFact(machine=machine, fact_name=fact_name, fact_data=fact_data, fact_recorded=datetime.now())
fact.save()
if 'Conditions' in report_data:
conditions = machine.conditions.all()
conditions.delete()
for condition_name, condition_data in report_data['Conditions'].iteritems():
# if it's a list (more than one result), we're going to conacetnate it into one comma separated string
if type(condition_data) == list:
result = None
for item in condition_data:
# is this the first loop? If so, no need for a comma
if result:
result = result + ', '+str(item)
else:
result = item
condition_data = result
#print condition_data
condition = Condition(machine=machine, condition_name=condition_name, condition_data=str(condition_data))
condition.save()
if 'osquery' in report_data:
try:
datelimit = (datetime.now() - timedelta(days=historical_days)).strftime("%s")
OSQueryResult.objects.filter(unix_time__lt=datelimit).delete()
except:
pass
for report in report_data['osquery']:
unix_time = int(report['unixTime'])
# Have we already processed this report?
try:
osqueryresult = OSQueryResult.objects.get(hostidentifier=report['hostIdentifier'], machine=machine, unix_time=unix_time, name=report['name'])
continue
except OSQueryResult.DoesNotExist:
osqueryresult = OSQueryResult(hostidentifier=report['hostIdentifier'], machine=machine, unix_time=unix_time, name=report['name'])
osqueryresult.save()
for items in report['diffResults']['added']:
for column, col_data in items.items():
osquerycolumn = OSQueryColumn(osquery_result=osqueryresult, action='added', column_name=column, column_data=col_data)
osquerycolumn.save()
for item in report['diffResults']['removed']:
for column, col_data in items.items():
osquerycolumn = OSQueryColumn(osquery_result=osqueryresult, action='removed', column_name=column, column_data=col_data)
osquerycolumn.save()
return HttpResponse("Sal report submmitted for %s"
% data.get('name'))
| macjustice/sal | server/views.py | Python | apache-2.0 | 47,897 |
import pytest
from hypothesis import assume, given
from hypothesis.strategies import integers, lists
from algorithms.structures.linkedlist import LinkedList
@given(lists(integers()))
def test_from_list(lst):
assume(len(lst) > 0)
head = LinkedList.from_pylist(lst)
assert list(head) == lst
def test_append():
head = LinkedList(5)
tail = head
tail = tail.append(4)
tail = tail.append(3)
tail = tail.append(2)
tail = tail.append(1)
assert list(head) == [5, 4, 3, 2, 1]
def test_prepend():
head = LinkedList(1)
head = head.prepend(2)
head = head.prepend(3)
head = head.prepend(4)
head = head.prepend(5)
assert list(head) == [5, 4, 3, 2, 1]
def test_delete():
lst = [1, 2, 3]
l1 = LinkedList.from_pylist(lst)
assert list(l1.delete()) == [2, 3]
l2 = LinkedList.from_pylist(lst)
ret = l2.next.delete()
assert ret == l2
assert list(l2) == [1, 3]
l3 = LinkedList.from_pylist(lst)
l3.next.next.delete()
assert l3.next.next is None
assert list(l3) == [1, 2]
def test_head():
head = LinkedList.from_pylist([1, 2, 3, 4, 5])
item = head.next.next
assert item.head == head
def test_last():
head = LinkedList.from_pylist([1, 2, 3, 4, 5])
assert head.last.key == 5
def test_search():
head = LinkedList.from_pylist([1, 2, 3, 4, 5])
item = head.search(3)
assert item.key == 3
item = head.search(10)
assert not item
def reverse_list(head, limit=100):
node = head.last
lst = []
i = 0
while node:
lst.append(node.key)
node = node.prev
i += 1
if i > limit:
raise Exception("Limit exceeded. List is most likely looped")
return lst
def forward_list(head, limit=100):
node = head
lst = []
i = 0
while node:
lst.append(node.key)
node = node.next
i += 1
if i > limit:
raise Exception("Limit exceeded. List is most likely looped")
return lst
@pytest.mark.parametrize(
"lst,a,b",
[
([1, 2, 3, 4, 5, 6, 7], 2, 6),
([1, 2, 3], 1, 2),
([1, 2, 3], 2, 3),
([1, 2, 3, 4], 2, 3),
([1, 2, 3, 4], 3, 2),
([1, 2, 3], 2, 2),
([1, 2, 3], 1, 1),
([1, 2, 3], 3, 3),
([1], 1, 1),
],
)
def test_swap(lst, a, b):
head = LinkedList.from_pylist(lst)
item1 = head.search(a)
item2 = head.search(b)
item1.swap(item2)
head = head.head
i, j = lst.index(a), lst.index(b)
lst[i], lst[j] = lst[j], lst[i]
assert forward_list(head) == lst
assert reverse_list(head) == list(reversed(lst))
@pytest.mark.parametrize("lst", [[1, 2, 3, 4], [1, 2, 3], [1, 2], [1]])
def test_reverse(lst):
head = LinkedList.from_pylist(lst)
head = head.reverse()
assert forward_list(head) == list(reversed(lst))
assert reverse_list(head) == lst
| vadimadr/python-algorithms | tests/test_linkedlist.py | Python | mit | 2,900 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""INSPIRE search factory used in invenio-records-rest."""
from flask import current_app, request
from invenio_records_rest.errors import InvalidQueryRESTError
from inspirehep.modules.search import IQ
def inspire_search_factory(self, search):
"""Parse query using Invenio-Query-Parser.
:param self: REST view.
:param search: Elastic search DSL search instance.
:returns: Tuple with search instance and URL arguments.
"""
from invenio_records_rest.facets import default_facets_factory
from invenio_records_rest.sorter import default_sorter_factory
query_string = request.values.get('q', '')
try:
search = search.query(IQ(query_string, index=search._index))
except SyntaxError:
current_app.logger.debug(
"Failed parsing query: {0}".format(
request.values.get('q', '')),
exc_info=True)
raise InvalidQueryRESTError()
search_index = search._index[0]
search, urlkwargs = default_facets_factory(search, search_index)
search, sortkwargs = default_sorter_factory(search, search_index)
for key, value in sortkwargs.items():
urlkwargs.add(key, value)
urlkwargs.add('q', query_string)
return search, urlkwargs
| Panos512/inspire-next | inspirehep/modules/search/query.py | Python | gpl-2.0 | 2,216 |
_base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
type='PISARetinaHead',
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
| open-mmlab/mmdetection | configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py | Python | apache-2.0 | 272 |
def set_environmentals(obj, ed):
def func(stag, attr, device, key, default=0):
lt = ed.get(attr, [])
if lt:
v = next((t['value'] for t in lt if t['device'] == device and
t['name'] == key), default)
setattr(obj, stag, v)
func('lab_temperature', 'lab_temperatures', 'EnvironmentalMonitor', 'Lab Temp.')
func('lab_humidity', 'lab_humiditys', 'EnvironmentalMonitor', 'Lab Hum.')
func('lab_airpressure', 'lab_pneumatics', 'AirPressure', 'Pressure')
func('east_diffuser_temperature', 'lab_temperatures', 'RPiWeather', 'Lab Temp. 3')
func('east_return_temperature', 'lab_temperatures', 'RPiWeather', 'Lab Temp. 4')
func('outside_temperature', 'lab_temperatures', 'NOAA', 'Outside Temp')
| UManPychron/pychron | pychron/experiment/utilities/environmentals.py | Python | apache-2.0 | 771 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common file and os related utilities, including tempdir manipulation."""
from __future__ import print_function
import collections
import contextlib
import ctypes
import ctypes.util
import datetime
import errno
import glob
import hashlib
import os
import pwd
import re
import shutil
import stat
import subprocess
import tempfile
import six
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import retry_util
from chromite.utils import key_value_store
# Env vars that tempdir can be gotten from; minimally, this
# needs to match python's tempfile module and match normal
# unix standards.
_TEMPDIR_ENV_VARS = ('TMPDIR', 'TEMP', 'TMP')
def GetNonRootUser():
"""Returns a non-root user. Defaults to the current user.
If the current user is root, returns the username of the person who
ran the emerge command. If running using sudo, returns the username
of the person who ran the sudo command. If no non-root user is
found, returns None.
"""
uid = os.getuid()
if uid == 0:
user = os.environ.get('PORTAGE_USERNAME', os.environ.get('SUDO_USER'))
else:
user = pwd.getpwuid(os.getuid()).pw_name
if user == 'root':
return None
else:
return user
def IsChildProcess(pid, name=None):
"""Return True if pid is a child of the current process.
Args:
pid: Child pid to search for in current process's pstree.
name: Name of the child process.
Note:
This function is not fool proof. If the process tree contains wierd names,
an incorrect match might be possible.
"""
cmd = ['pstree', '-Ap', str(os.getpid())]
pstree = cros_build_lib.run(cmd, capture_output=True, print_cmd=False,
encoding='utf-8').stdout
if name is None:
match = '(%d)' % pid
else:
match = '-%s(%d)' % (name, pid)
return match in pstree
def ExpandPath(path):
"""Returns path after passing through realpath and expanduser."""
return os.path.realpath(os.path.expanduser(path))
def IsSubPath(path, other):
"""Returns whether |path| is a sub path of |other|."""
path = os.path.abspath(path)
other = os.path.abspath(other)
if path == other:
return True
return path.startswith(other + os.sep)
def AllocateFile(path, size, makedirs=False):
"""Allocates a file of a certain |size| in |path|.
Args:
path: Path to allocate the file.
size: The length, in bytes, of the desired file.
makedirs: If True, create missing leading directories in the path.
"""
if makedirs:
SafeMakedirs(os.path.dirname(path))
with open(path, 'w') as out:
out.truncate(size)
# All the modes that we allow people to pass to WriteFile. This allows us to
# make assumptions about the input so we can update it if needed.
_VALID_WRITE_MODES = {
# Read & write, but no truncation, and file offset is 0.
'r+', 'r+b',
# Writing (and maybe reading) with truncation.
'w', 'wb', 'w+', 'w+b',
# Writing (and maybe reading), but no truncation, and file offset is at end.
'a', 'ab', 'a+', 'a+b',
}
def WriteFile(path, content, mode='w', encoding=None, errors=None, atomic=False,
makedirs=False, sudo=False):
"""Write the given content to disk.
Args:
path: Pathway to write the content to.
content: Content to write. May be either an iterable, or a string.
mode: The mode to use when opening the file. 'w' is for text files (see the
following settings) and 'wb' is for binary files. If appending, pass
'w+', etc...
encoding: The encoding of the file content. Text files default to 'utf-8'.
errors: How to handle encoding errors. Text files default to 'strict'.
atomic: If the updating of the file should be done atomically. Note this
option is incompatible w/ append mode.
makedirs: If True, create missing leading directories in the path.
sudo: If True, write the file as root.
"""
if mode not in _VALID_WRITE_MODES:
raise ValueError('mode must be one of {"%s"}, not %r' %
('", "'.join(sorted(_VALID_WRITE_MODES)), mode))
if sudo and atomic and ('a' in mode or '+' in mode):
raise ValueError('append mode does not work in sudo+atomic mode')
if 'b' in mode:
if encoding is not None or errors is not None:
raise ValueError('binary mode does not use encoding/errors')
else:
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
if makedirs:
SafeMakedirs(os.path.dirname(path), sudo=sudo)
# TODO(vapier): We can merge encoding/errors into the open call once we are
# Python 3 only. Until then, we have to handle it ourselves.
if 'b' in mode:
write_wrapper = lambda x: x
else:
mode += 'b'
def write_wrapper(iterable):
for item in iterable:
yield item.encode(encoding, errors)
# If the file needs to be written as root and we are not root, write to a temp
# file, move it and change the permission.
if sudo and os.getuid() != 0:
if 'a' in mode or '+' in mode:
# Use dd to run through sudo & append the output, and write the new data
# to it through stdin.
cros_build_lib.sudo_run(
['dd', 'conv=notrunc', 'oflag=append', 'status=none',
'of=%s' % (path,)], print_cmd=False, input=content)
else:
with tempfile.NamedTemporaryFile(mode=mode, delete=False) as temp:
write_path = temp.name
temp.writelines(write_wrapper(
cros_build_lib.iflatten_instance(content)))
os.chmod(write_path, 0o644)
try:
mv_target = path if not atomic else path + '.tmp'
cros_build_lib.sudo_run(['mv', write_path, mv_target],
print_cmd=False, stderr=True)
Chown(mv_target, user='root', group='root')
if atomic:
cros_build_lib.sudo_run(['mv', mv_target, path],
print_cmd=False, stderr=True)
except cros_build_lib.RunCommandError:
SafeUnlink(write_path)
SafeUnlink(mv_target)
raise
else:
# We have the right permissions, simply write the file in python.
write_path = path
if atomic:
write_path = path + '.tmp'
with open(write_path, mode) as f:
f.writelines(write_wrapper(cros_build_lib.iflatten_instance(content)))
if not atomic:
return
try:
os.rename(write_path, path)
except EnvironmentError:
SafeUnlink(write_path)
raise
def Touch(path, makedirs=False, mode=None):
"""Simulate unix touch. Create if doesn't exist and update its timestamp.
Args:
path: a string, file name of the file to touch (creating if not present).
makedirs: If True, create missing leading directories in the path.
mode: The access permissions to set. In the style of chmod. Defaults to
using the umask.
"""
if makedirs:
SafeMakedirs(os.path.dirname(path))
# Create the file if nonexistant.
open(path, 'a').close()
if mode is not None:
os.chmod(path, mode)
# Update timestamp to right now.
os.utime(path, None)
def Chown(path, user=None, group=None, recursive=False):
"""Simple sudo chown path to the user.
Defaults to user running command. Does nothing if run as root user unless
a new owner is provided.
Args:
path: str - File/directory to chown.
user: str|int|None - User to chown the file to. Defaults to current user.
group: str|int|None - Group to assign the file to.
recursive: Also chown child files/directories recursively.
"""
if user is None:
user = GetNonRootUser() or ''
else:
user = str(user)
group = '' if group is None else str(group)
if user or group:
cmd = ['chown']
if recursive:
cmd += ['-R']
cmd += ['%s:%s' % (user, group), path]
cros_build_lib.sudo_run(cmd, print_cmd=False,
stderr=True, stdout=True)
def ReadFile(path, mode='r', encoding=None, errors=None):
"""Read a given file on disk. Primarily useful for one off small files.
The defaults are geared towards reading UTF-8 encoded text.
Args:
path: The file to read.
mode: The mode to use when opening the file. 'r' is for text files (see the
following settings) and 'rb' is for binary files.
encoding: The encoding of the file content. Text files default to 'utf-8'.
errors: How to handle encoding errors. Text files default to 'strict'.
Returns:
The content of the file, either as bytes or a string (with the specified
encoding).
"""
if mode not in ('r', 'rb'):
raise ValueError('mode may only be "r" or "rb", not %r' % (mode,))
if 'b' in mode:
if encoding is not None or errors is not None:
raise ValueError('binary mode does not use encoding/errors')
else:
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
with open(path, 'rb') as f:
# TODO(vapier): We can merge encoding/errors into the open call once we are
# Python 3 only. Until then, we have to handle it ourselves.
ret = f.read()
if 'b' not in mode:
ret = ret.decode(encoding, errors)
return ret
def MD5HashFile(path):
"""Calculate the md5 hash of a given file path.
Args:
path: The path of the file to hash.
Returns:
The hex digest of the md5 hash of the file.
"""
contents = ReadFile(path, mode='rb')
return hashlib.md5(contents).hexdigest()
def SafeSymlink(source, dest, sudo=False):
"""Create a symlink at |dest| pointing to |source|.
This will override the |dest| if the symlink exists. This operation is not
atomic.
Args:
source: source path.
dest: destination path.
sudo: If True, create the link as root.
"""
if sudo and os.getuid() != 0:
cros_build_lib.sudo_run(['ln', '-sfT', source, dest],
print_cmd=False, stderr=True)
else:
SafeUnlink(dest)
os.symlink(source, dest)
def SafeUnlink(path, sudo=False):
"""Unlink a file from disk, ignoring if it doesn't exist.
Returns:
True if the file existed and was removed, False if it didn't exist.
"""
try:
os.unlink(path)
return True
except EnvironmentError as e:
if e.errno == errno.ENOENT:
return False
if not sudo:
raise
# If we're still here, we're falling back to sudo.
cros_build_lib.sudo_run(['rm', '--', path], print_cmd=False, stderr=True)
return True
def SafeMakedirs(path, mode=0o775, sudo=False, user='root'):
"""Make parent directories if needed. Ignore if existing.
Args:
path: The path to create. Intermediate directories will be created as
needed.
mode: The access permissions in the style of chmod.
sudo: If True, create it via sudo, thus root owned.
user: If |sudo| is True, run sudo as |user|.
Returns:
True if the directory had to be created, False if otherwise.
Raises:
EnvironmentError: If the makedir failed.
RunCommandError: If using run and the command failed for any reason.
"""
if sudo and not (os.getuid() == 0 and user == 'root'):
if os.path.isdir(path):
return False
cros_build_lib.sudo_run(
['mkdir', '-p', '--mode', '%o' % mode, path], user=user,
print_cmd=False, stderr=True, stdout=True)
cros_build_lib.sudo_run(
['chmod', '%o' % mode, path],
print_cmd=False, stderr=True, stdout=True)
return True
try:
os.makedirs(path, mode)
# If we made the directory, force the mode.
os.chmod(path, mode)
return True
except EnvironmentError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
# If the mode on the directory does not match the request, log it.
# It is the callers responsibility to coordinate mode values if there is a
# need for that.
if stat.S_IMODE(os.stat(path).st_mode) != mode:
try:
os.chmod(path, mode)
except EnvironmentError:
# Just make sure it's a directory.
if not os.path.isdir(path):
raise
return False
class MakingDirsAsRoot(Exception):
"""Raised when creating directories as root."""
def SafeMakedirsNonRoot(path, mode=0o775, user=None):
"""Create directories and make sure they are not owned by root.
See SafeMakedirs for the arguments and returns.
"""
if user is None:
user = GetNonRootUser()
if user is None or user == 'root':
raise MakingDirsAsRoot('Refusing to create %s as user %s!' % (path, user))
created = False
should_chown = False
try:
created = SafeMakedirs(path, mode=mode, user=user)
if not created:
# Sometimes, the directory exists, but is owned by root. As a HACK, we
# will chown it to the requested user.
stat_info = os.stat(path)
should_chown = (stat_info.st_uid == 0)
except OSError as e:
if e.errno == errno.EACCES:
# Sometimes, (a prefix of the) path we're making the directory in may be
# owned by root, and so we fail. As a HACK, use da power to create
# directory and then chown it.
created = should_chown = SafeMakedirs(path, mode=mode, sudo=True)
if should_chown:
Chown(path, user=user)
return created
class BadPathsException(Exception):
"""Raised by various osutils path manipulation functions on bad input."""
def CopyDirContents(from_dir, to_dir, symlinks=False, allow_nonempty=False):
"""Copy contents of from_dir to to_dir. Both should exist.
shutil.copytree allows one to copy a rooted directory tree along with the
containing directory. OTOH, this function copies the contents of from_dir to
an existing directory. For example, for the given paths:
from/
inside/x.py
y.py
to/
shutil.copytree('from', 'to')
# Raises because 'to' already exists.
shutil.copytree('from', 'to/non_existent_dir')
to/non_existent_dir/
inside/x.py
y.py
CopyDirContents('from', 'to')
to/
inside/x.py
y.py
Args:
from_dir: The directory whose contents should be copied. Must exist.
to_dir: The directory to which contents should be copied. Must exist.
symlinks: Whether symlinks should be copied or dereferenced. When True, all
symlinks will be copied as symlinks into the destination. When False,
the symlinks will be dereferenced and the contents copied over.
allow_nonempty: If True, do not die when to_dir is nonempty.
Raises:
BadPathsException: if the source / target directories don't exist, or if
target directory is non-empty when allow_nonempty=False.
OSError: on esoteric permission errors.
"""
if not os.path.isdir(from_dir):
raise BadPathsException('Source directory %s does not exist.' % from_dir)
if not os.path.isdir(to_dir):
raise BadPathsException('Destination directory %s does not exist.' % to_dir)
if os.listdir(to_dir) and not allow_nonempty:
raise BadPathsException('Destination directory %s is not empty.' % to_dir)
for name in os.listdir(from_dir):
from_path = os.path.join(from_dir, name)
to_path = os.path.join(to_dir, name)
if symlinks and os.path.islink(from_path):
os.symlink(os.readlink(from_path), to_path)
elif os.path.isdir(from_path):
shutil.copytree(from_path, to_path, symlinks=symlinks)
elif os.path.isfile(from_path):
shutil.copy2(from_path, to_path)
def RmDir(path, ignore_missing=False, sudo=False):
"""Recursively remove a directory.
Args:
path: Path of directory to remove.
ignore_missing: Do not error when path does not exist.
sudo: Remove directories as root.
"""
# Using `sudo` is a bit expensive, so try to delete everything natively first.
try:
shutil.rmtree(path)
return
except EnvironmentError as e:
if ignore_missing and e.errno == errno.ENOENT:
return
if not sudo:
raise
# If we're still here, we're falling back to sudo.
try:
cros_build_lib.sudo_run(
['rm', '-r%s' % ('f' if ignore_missing else '',), '--', path],
debug_level=logging.DEBUG, stdout=True, stderr=True)
except cros_build_lib.RunCommandError as e:
if not ignore_missing or os.path.exists(path):
# If we're not ignoring the rm ENOENT equivalent, throw it;
# if the pathway still exists, something failed, thus throw it.
raise
class EmptyDirNonExistentException(BadPathsException):
"""EmptyDir was called on a non-existent directory without ignore_missing."""
def EmptyDir(path, ignore_missing=False, sudo=False, exclude=()):
"""Remove all files inside a directory, including subdirs.
Args:
path: Path of directory to empty.
ignore_missing: Do not error when path does not exist.
sudo: Remove directories as root.
exclude: Iterable of file names to exclude from the cleanup. They should
exactly match the file or directory name in path.
e.g. ['foo', 'bar']
Raises:
EmptyDirNonExistentException: if ignore_missing false, and dir is missing.
OSError: If the directory is not user writable.
"""
path = ExpandPath(path)
exclude = set(exclude)
if not os.path.exists(path):
if ignore_missing:
return
raise EmptyDirNonExistentException(
'EmptyDir called non-existent: %s' % path)
# We don't catch OSError if path is not a directory.
for candidate in os.listdir(path):
if candidate not in exclude:
subpath = os.path.join(path, candidate)
# Both options can throw OSError if there is a permission problem.
if os.path.isdir(subpath):
RmDir(subpath, ignore_missing=ignore_missing, sudo=sudo)
else:
SafeUnlink(subpath, sudo)
def Which(binary, path=None, mode=os.X_OK, root=None):
"""Return the absolute path to the specified binary.
Args:
binary: The binary to look for.
path: Search path. Defaults to os.environ['PATH'].
mode: File mode to check on the binary.
root: Path to automatically prefix to every element of |path|.
Returns:
The full path to |binary| if found (with the right mode). Otherwise, None.
"""
if path is None:
path = os.environ.get('PATH', '')
for p in path.split(os.pathsep):
if root and p.startswith('/'):
# Don't prefix relative paths. We might want to support this at some
# point, but it's not worth the coding hassle currently.
p = os.path.join(root, p.lstrip('/'))
p = os.path.join(p, binary)
if os.path.isfile(p) and os.access(p, mode):
return p
return None
def FindMissingBinaries(needed_tools):
"""Verifies that the required tools are present on the system.
This is especially important for scripts that are intended to run
outside the chroot.
Args:
needed_tools: an array of string specified binaries to look for.
Returns:
If all tools are found, returns the empty list. Otherwise, returns the
list of missing tools.
"""
return [binary for binary in needed_tools if Which(binary) is None]
def DirectoryIterator(base_path):
"""Iterates through the files and subdirs of a directory."""
for root, dirs, files in os.walk(base_path):
for e in [d + os.sep for d in dirs] + files:
yield os.path.join(root, e)
def IteratePaths(end_path):
"""Generator that iterates down to |end_path| from root /.
Args:
end_path: The destination. If this is a relative path, it will be resolved
to absolute path. In all cases, it will be normalized.
Yields:
All the paths gradually constructed from / to |end_path|. For example:
IteratePaths("/this/path") yields "/", "/this", and "/this/path".
"""
return reversed(list(IteratePathParents(end_path)))
def IteratePathParents(start_path):
"""Generator that iterates through a directory's parents.
Args:
start_path: The path to start from.
Yields:
The passed-in path, along with its parents. i.e.,
IteratePathParents('/usr/local') would yield '/usr/local', '/usr', and '/'.
"""
path = os.path.abspath(start_path)
# There's a bug that abspath('//') returns '//'. We need to renormalize it.
if path == '//':
path = '/'
yield path
while path.strip('/'):
path = os.path.dirname(path)
yield path
def FindInPathParents(path_to_find, start_path, test_func=None, end_path=None):
"""Look for a relative path, ascending through parent directories.
Ascend through parent directories of current path looking for a relative
path. I.e., given a directory structure like:
-/
|
--usr
|
--bin
|
--local
|
--google
the call FindInPathParents('bin', '/usr/local') would return '/usr/bin', and
the call FindInPathParents('google', '/usr/local') would return
'/usr/local/google'.
Args:
path_to_find: The relative path to look for.
start_path: The path to start the search from. If |start_path| is a
directory, it will be included in the directories that are searched.
test_func: The function to use to verify the relative path. Defaults to
os.path.exists. The function will be passed one argument - the target
path to test. A True return value will cause AscendingLookup to return
the target.
end_path: The path to stop searching.
"""
if end_path is not None:
end_path = os.path.abspath(end_path)
if test_func is None:
test_func = os.path.exists
for path in IteratePathParents(start_path):
if path == end_path:
return None
target = os.path.join(path, path_to_find)
if test_func(target):
return target
return None
def SetGlobalTempDir(tempdir_value, tempdir_env=None):
"""Set the global temp directory to the specified |tempdir_value|
Args:
tempdir_value: The new location for the global temp directory.
tempdir_env: Optional. A list of key/value pairs to set in the
environment. If not provided, set all global tempdir environment
variables to point at |tempdir_value|.
Returns:
Returns (old_tempdir_value, old_tempdir_env).
old_tempdir_value: The old value of the global temp directory.
old_tempdir_env: A list of the key/value pairs that control the tempdir
environment and were set prior to this function. If the environment
variable was not set, it is recorded as None.
"""
# pylint: disable=protected-access
with tempfile._once_lock:
old_tempdir_value = GetGlobalTempDir()
old_tempdir_env = tuple((x, os.environ.get(x)) for x in _TEMPDIR_ENV_VARS)
# Now update TMPDIR/TEMP/TMP, and poke the python
# internals to ensure all subprocess/raw tempfile
# access goes into this location.
if tempdir_env is None:
os.environ.update((x, tempdir_value) for x in _TEMPDIR_ENV_VARS)
else:
for key, value in tempdir_env:
if value is None:
os.environ.pop(key, None)
else:
os.environ[key] = value
# Finally, adjust python's cached value (we know it's cached by here
# since we invoked _get_default_tempdir from above). Note this
# is necessary since we want *all* output from that point
# forward to go to this location.
tempfile.tempdir = tempdir_value
return (old_tempdir_value, old_tempdir_env)
def GetGlobalTempDir():
"""Get the path to the current global tempdir.
The global tempdir path can be modified through calls to SetGlobalTempDir.
"""
# pylint: disable=protected-access
return tempfile._get_default_tempdir()
def _TempDirSetup(self, prefix='tmp', set_global=False, base_dir=None):
"""Generate a tempdir, modifying the object, and env to use it.
Specifically, if set_global is True, then from this invocation forward,
python and all subprocesses will use this location for their tempdir.
The matching _TempDirTearDown restores the env to what it was.
"""
# Stash the old tempdir that was used so we can
# switch it back on the way out.
self.tempdir = tempfile.mkdtemp(prefix=prefix, dir=base_dir)
os.chmod(self.tempdir, 0o700)
if set_global:
self._orig_tempdir_value, self._orig_tempdir_env = \
SetGlobalTempDir(self.tempdir)
def _TempDirTearDown(self, force_sudo, delete=True):
# Note that _TempDirSetup may have failed, resulting in these attributes
# not being set; this is why we use getattr here (and must).
tempdir = getattr(self, 'tempdir', None)
try:
if tempdir is not None and delete:
RmDir(tempdir, ignore_missing=True, sudo=force_sudo)
except EnvironmentError as e:
# Suppress ENOENT since we may be invoked
# in a context where parallel wipes of the tempdir
# may be occuring; primarily during hard shutdowns.
if e.errno != errno.ENOENT:
raise
# Restore environment modification if necessary.
orig_tempdir_value = getattr(self, '_orig_tempdir_value', None)
if orig_tempdir_value is not None:
# pylint: disable=protected-access
SetGlobalTempDir(orig_tempdir_value, self._orig_tempdir_env)
class TempDir(object):
"""Object that creates a temporary directory.
This object can either be used as a context manager or just as a simple
object. The temporary directory is stored as self.tempdir in the object, and
is returned as a string by a 'with' statement.
"""
def __init__(self, **kwargs):
"""Constructor. Creates the temporary directory.
Args:
prefix: See tempfile.mkdtemp documentation.
base_dir: The directory to place the temporary directory.
set_global: Set this directory as the global temporary directory.
delete: Whether the temporary dir should be deleted as part of cleanup.
(default: True)
sudo_rm: Whether the temporary dir will need root privileges to remove.
(default: False)
"""
self.kwargs = kwargs.copy()
self.delete = kwargs.pop('delete', True)
self.sudo_rm = kwargs.pop('sudo_rm', False)
self.tempdir = None
_TempDirSetup(self, **kwargs)
def SetSudoRm(self, enable=True):
"""Sets |sudo_rm|, which forces us to delete temporary files as root."""
self.sudo_rm = enable
def Cleanup(self):
"""Clean up the temporary directory."""
if self.tempdir is not None:
try:
_TempDirTearDown(self, self.sudo_rm, delete=self.delete)
finally:
self.tempdir = None
def __enter__(self):
"""Return the temporary directory."""
return self.tempdir
def __exit__(self, exc_type, exc_value, exc_traceback):
try:
self.Cleanup()
except Exception:
if exc_type:
# If an exception from inside the context was already in progress,
# log our cleanup exception, then allow the original to resume.
logging.error('While exiting %s:', self, exc_info=True)
if self.tempdir:
# Log all files in tempdir at the time of the failure.
try:
logging.error('Directory contents were:')
for name in os.listdir(self.tempdir):
logging.error(' %s', name)
except OSError:
logging.error(' Directory did not exist.')
# Log all mounts at the time of the failure, since that's the most
# common cause.
mount_results = cros_build_lib.run(
['mount'], stdout=True, stderr=subprocess.STDOUT,
check=False)
logging.error('Mounts were:')
logging.error(' %s', mount_results.output)
else:
# If there was not an exception from the context, raise ours.
raise
def __del__(self):
self.Cleanup()
def __str__(self):
return self.tempdir if self.tempdir else ''
def TempDirDecorator(func):
"""Populates self.tempdir with path to a temporary writeable directory."""
def f(self, *args, **kwargs):
with TempDir() as tempdir:
self.tempdir = tempdir
return func(self, *args, **kwargs)
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__module__ = func.__module__
return f
def TempFileDecorator(func):
"""Populates self.tempfile with path to a temporary writeable file"""
def f(self, *args, **kwargs):
with tempfile.NamedTemporaryFile(dir=self.tempdir, delete=False) as f:
self.tempfile = f.name
return func(self, *args, **kwargs)
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__module__ = func.__module__
return TempDirDecorator(f)
# Flags synced from sys/mount.h. See mount(2) for details.
MS_RDONLY = 1
MS_NOSUID = 2
MS_NODEV = 4
MS_NOEXEC = 8
MS_SYNCHRONOUS = 16
MS_REMOUNT = 32
MS_MANDLOCK = 64
MS_DIRSYNC = 128
MS_NOATIME = 1024
MS_NODIRATIME = 2048
MS_BIND = 4096
MS_MOVE = 8192
MS_REC = 16384
MS_SILENT = 32768
MS_POSIXACL = 1 << 16
MS_UNBINDABLE = 1 << 17
MS_PRIVATE = 1 << 18
MS_SLAVE = 1 << 19
MS_SHARED = 1 << 20
MS_RELATIME = 1 << 21
MS_KERNMOUNT = 1 << 22
MS_I_VERSION = 1 << 23
MS_STRICTATIME = 1 << 24
MS_ACTIVE = 1 << 30
MS_NOUSER = 1 << 31
def Mount(source, target, fstype, flags, data=''):
"""Call the mount(2) func; see the man page for details."""
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
# These fields might be a string or 0 (for NULL). Convert to bytes.
def _MaybeEncode(s):
return s.encode('utf-8') if isinstance(s, six.string_types) else s
if libc.mount(_MaybeEncode(source), _MaybeEncode(target),
_MaybeEncode(fstype), ctypes.c_int(flags),
_MaybeEncode(data)) != 0:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def MountDir(src_path, dst_path, fs_type=None, sudo=True, makedirs=True,
mount_opts=('nodev', 'noexec', 'nosuid'), skip_mtab=False,
**kwargs):
"""Mount |src_path| at |dst_path|
Args:
src_path: Source of the new mount.
dst_path: Where to mount things.
fs_type: Specify the filesystem type to use. Defaults to autodetect.
sudo: Run through sudo.
makedirs: Create |dst_path| if it doesn't exist.
mount_opts: List of options to pass to `mount`.
skip_mtab: Whether to write new entries to /etc/mtab.
kwargs: Pass all other args to run.
"""
if sudo:
runcmd = cros_build_lib.sudo_run
else:
runcmd = cros_build_lib.run
if makedirs:
SafeMakedirs(dst_path, sudo=sudo)
cmd = ['mount', src_path, dst_path]
if skip_mtab:
cmd += ['-n']
if fs_type:
cmd += ['-t', fs_type]
if mount_opts:
cmd += ['-o', ','.join(mount_opts)]
runcmd(cmd, **kwargs)
def MountTmpfsDir(path, name='osutils.tmpfs', size='5G',
mount_opts=('nodev', 'noexec', 'nosuid'), **kwargs):
"""Mount a tmpfs at |path|
Args:
path: Directory to mount the tmpfs.
name: Friendly name to include in mount output.
size: Size of the temp fs.
mount_opts: List of options to pass to `mount`.
kwargs: Pass all other args to MountDir.
"""
mount_opts = list(mount_opts) + ['size=%s' % size]
MountDir(name, path, fs_type='tmpfs', mount_opts=mount_opts, **kwargs)
def UmountDir(path, lazy=True, sudo=True, cleanup=True):
"""Unmount a previously mounted temp fs mount.
Args:
path: Directory to unmount.
lazy: Whether to do a lazy unmount.
sudo: Run through sudo.
cleanup: Whether to delete the |path| after unmounting.
Note: Does not work when |lazy| is set.
"""
if sudo:
runcmd = cros_build_lib.sudo_run
else:
runcmd = cros_build_lib.run
cmd = ['umount', '-d', path]
if lazy:
cmd += ['-l']
runcmd(cmd, debug_level=logging.DEBUG)
if cleanup:
# We will randomly get EBUSY here even when the umount worked. Suspect
# this is due to the host distro doing stupid crap on us like autoscanning
# directories when they get mounted.
def _retry(e):
# When we're using `rm` (which is required for sudo), we can't cleanly
# detect the aforementioned failure. This is because `rm` will see the
# errno, handle itself, and then do exit(1). Which means all we see is
# that rm failed. Assume it's this issue as -rf will ignore most things.
if isinstance(e, cros_build_lib.RunCommandError):
return True
elif isinstance(e, OSError):
# When we aren't using sudo, we do the unlink ourselves, so the exact
# errno is bubbled up to us and we can detect it specifically without
# potentially ignoring all other possible failures.
return e.errno == errno.EBUSY
else:
# Something else, we don't know so do not retry.
return False
retry_util.GenericRetry(_retry, 60, RmDir, path, sudo=sudo, sleep=1)
def UmountTree(path):
"""Unmounts |path| and any submounts under it."""
# Scrape it from /proc/mounts since it's easily accessible;
# additionally, unmount in reverse order of what's listed there
# rather than trying a reverse sorting; it's possible for
# mount /z /foon
# mount /foon/blah -o loop /a
# which reverse sorting cannot handle.
path = os.path.realpath(path).rstrip('/') + '/'
mounts = [mtab.destination for mtab in IterateMountPoints() if
mtab.destination.startswith(path) or
mtab.destination == path.rstrip('/')]
for mount_pt in reversed(mounts):
UmountDir(mount_pt, lazy=False, cleanup=False)
def SetEnvironment(env):
"""Restore the environment variables to that of passed in dictionary."""
os.environ.clear()
os.environ.update(env)
def SourceEnvironment(script, whitelist, ifs=',', env=None, multiline=False):
"""Returns the environment exported by a shell script.
Note that the script is actually executed (sourced), so do not use this on
files that have side effects (such as modify the file system). Stdout will
be sent to /dev/null, so just echoing is OK.
Args:
script: The shell script to 'source'.
whitelist: An iterable of environment variables to retrieve values for.
ifs: When showing arrays, what separator to use.
env: A dict of the initial env to pass down. You can also pass it None
(to clear the env) or True (to preserve the current env).
multiline: Allow a variable to span multiple lines.
Returns:
A dictionary containing the values of the whitelisted environment
variables that are set.
"""
dump_script = ['source "%s" >/dev/null' % script,
'IFS="%s"' % ifs]
for var in whitelist:
# Note: If we want to get more exact results out of bash, we should switch
# to using `declare -p "${var}"`. It would require writing a custom parser
# here, but it would be more robust.
dump_script.append(
'[[ "${%(var)s+set}" == "set" ]] && echo "%(var)s=\\"${%(var)s[*]}\\""'
% {'var': var})
dump_script.append('exit 0')
if env is None:
env = {}
elif env is True:
env = None
output = cros_build_lib.run(['bash'], env=env, capture_output=True,
print_cmd=False, encoding='utf-8',
input='\n'.join(dump_script)).output
return key_value_store.LoadData(output, multiline=multiline)
def ListBlockDevices(device_path=None, in_bytes=False):
"""Lists all block devices.
Args:
device_path: device path (e.g. /dev/sdc).
in_bytes: whether to display size in bytes.
Returns:
A list of BlockDevice items with attributes 'NAME', 'RM', 'TYPE',
'SIZE' (RM stands for removable).
"""
keys = ['NAME', 'RM', 'TYPE', 'SIZE']
BlockDevice = collections.namedtuple('BlockDevice', keys)
cmd = ['lsblk', '--pairs']
if in_bytes:
cmd.append('--bytes')
if device_path:
cmd.append(device_path)
cmd += ['--output', ','.join(keys)]
result = cros_build_lib.dbg_run(cmd, capture_output=True, encoding='utf-8')
devices = []
for line in result.stdout.strip().splitlines():
d = {}
for k, v in re.findall(r'(\S+?)=\"(.+?)\"', line):
d[k] = v
devices.append(BlockDevice(**d))
return devices
def GetDeviceInfo(device, keyword='model'):
"""Get information of |device| by searching through device path.
Looks for the file named |keyword| in the path upwards from
/sys/block/|device|/device. This path is a symlink and will be fully
expanded when searching.
Args:
device: Device name (e.g. 'sdc').
keyword: The filename to look for (e.g. product, model).
Returns:
The content of the |keyword| file.
"""
device_path = os.path.join('/sys', 'block', device)
if not os.path.isdir(device_path):
raise ValueError('%s is not a valid device path.' % device_path)
path_list = ExpandPath(os.path.join(device_path, 'device')).split(os.path.sep)
while len(path_list) > 2:
target = os.path.join(os.path.sep.join(path_list), keyword)
if os.path.isfile(target):
return ReadFile(target).strip()
path_list = path_list[:-1]
def GetDeviceSize(device_path, in_bytes=False):
"""Returns the size of |device|.
Args:
device_path: Device path (e.g. '/dev/sdc').
in_bytes: If set True, returns the size in bytes.
Returns:
Size of the device in human readable format unless |in_bytes| is set.
"""
devices = ListBlockDevices(device_path=device_path, in_bytes=in_bytes)
for d in devices:
if d.TYPE == 'disk':
return int(d.SIZE) if in_bytes else d.SIZE
raise ValueError('No size info of %s is found.' % device_path)
FileInfo = collections.namedtuple(
'FileInfo', ['path', 'owner', 'size', 'atime', 'mtime'])
def StatFilesInDirectory(path, recursive=False, to_string=False):
"""Stat files in the directory |path|.
Args:
path: Path to the target directory.
recursive: Whether to recurisvely list all files in |path|.
to_string: Whether to return a string containing the metadata of the
files.
Returns:
If |to_string| is False, returns a list of FileInfo objects. Otherwise,
returns a string of metadata of the files.
"""
path = ExpandPath(path)
def ToFileInfo(path, stat_val):
return FileInfo(path,
pwd.getpwuid(stat_val.st_uid)[0],
stat_val.st_size,
datetime.datetime.fromtimestamp(stat_val.st_atime),
datetime.datetime.fromtimestamp(stat_val.st_mtime))
file_infos = []
for root, dirs, files in os.walk(path, topdown=True):
for filename in dirs + files:
filepath = os.path.join(root, filename)
file_infos.append(ToFileInfo(filepath, os.lstat(filepath)))
if not recursive:
# Process only the top-most directory.
break
if not to_string:
return file_infos
msg = 'Listing the content of %s' % path
msg_format = ('Path: {x.path}, Owner: {x.owner}, Size: {x.size} bytes, '
'Accessed: {x.atime}, Modified: {x.mtime}')
msg = '%s\n%s' % (msg,
'\n'.join([msg_format.format(x=x) for x in file_infos]))
return msg
@contextlib.contextmanager
def ChdirContext(target_dir):
"""A context manager to chdir() into |target_dir| and back out on exit.
Args:
target_dir: A target directory to chdir into.
"""
cwd = os.getcwd()
os.chdir(target_dir)
try:
yield
finally:
os.chdir(cwd)
def _SameFileSystem(path1, path2):
"""Determine whether two paths are on the same filesystem.
Be resilient to nonsense paths. Return False instead of blowing up.
"""
try:
return os.stat(path1).st_dev == os.stat(path2).st_dev
except OSError:
return False
class MountOverlayContext(object):
"""A context manager for mounting an OverlayFS directory.
An overlay filesystem will be mounted at |mount_dir|, and will be unmounted
when the context exits.
"""
OVERLAY_FS_MOUNT_ERRORS = (32,)
def __init__(self, lower_dir, upper_dir, mount_dir, cleanup=False):
"""Initialize.
Args:
lower_dir: The lower directory (read-only).
upper_dir: The upper directory (read-write).
mount_dir: The mount point for the merged overlay.
cleanup: Whether to remove the mount point after unmounting. This uses an
internal retry logic for cases where unmount is successful but the
directory still appears busy, and is generally more resilient than
removing it independently.
"""
self._lower_dir = lower_dir
self._upper_dir = upper_dir
self._mount_dir = mount_dir
self._cleanup = cleanup
self.tempdir = None
def __enter__(self):
# Upstream Kernel 3.18 and the ubuntu backport of overlayfs have different
# APIs. We must support both.
try_legacy = False
stashed_e_overlay_str = None
# We must ensure that upperdir and workdir are on the same filesystem.
if _SameFileSystem(self._upper_dir, GetGlobalTempDir()):
_TempDirSetup(self)
elif _SameFileSystem(self._upper_dir, os.path.dirname(self._upper_dir)):
_TempDirSetup(self, base_dir=os.path.dirname(self._upper_dir))
else:
logging.debug('Could create find a workdir on the same filesystem as %s. '
'Trying legacy API instead.',
self._upper_dir)
try_legacy = True
if not try_legacy:
try:
MountDir('overlay', self._mount_dir, fs_type='overlay', makedirs=False,
mount_opts=('lowerdir=%s' % self._lower_dir,
'upperdir=%s' % self._upper_dir,
'workdir=%s' % self.tempdir),
quiet=True)
except cros_build_lib.RunCommandError as e_overlay:
if e_overlay.result.returncode not in self.OVERLAY_FS_MOUNT_ERRORS:
raise
logging.debug('Failed to mount overlay filesystem. Trying legacy API.')
stashed_e_overlay_str = str(e_overlay)
try_legacy = True
if try_legacy:
try:
MountDir('overlayfs', self._mount_dir, fs_type='overlayfs',
makedirs=False,
mount_opts=('lowerdir=%s' % self._lower_dir,
'upperdir=%s' % self._upper_dir),
quiet=True)
except cros_build_lib.RunCommandError as e_overlayfs:
logging.error('All attempts at mounting overlay filesystem failed.')
if stashed_e_overlay_str is not None:
logging.error('overlay: %s', stashed_e_overlay_str)
logging.error('overlayfs: %s', str(e_overlayfs))
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
UmountDir(self._mount_dir, cleanup=self._cleanup)
_TempDirTearDown(self, force_sudo=True)
MountInfo = collections.namedtuple(
'MountInfo',
'source destination filesystem options')
def IterateMountPoints(proc_file='/proc/mounts'):
"""Iterate over all mounts as reported by "/proc/mounts".
Args:
proc_file: A path to a file whose content is similar to /proc/mounts.
Default to "/proc/mounts" itself.
Returns:
A generator that yields MountInfo objects.
"""
with open(proc_file) as f:
for line in f:
# Escape any \xxx to a char.
source, destination, filesystem, options, _, _ = [
re.sub(r'\\([0-7]{3})', lambda m: chr(int(m.group(1), 8)), x)
for x in line.split()
]
mtab = MountInfo(source, destination, filesystem, options)
yield mtab
def IsMounted(path):
"""Determine if |path| is already mounted or not."""
path = os.path.realpath(path).rstrip('/')
mounts = [mtab.destination for mtab in IterateMountPoints()]
if path in mounts:
return True
return False
def ResolveSymlinkInRoot(file_name, root):
"""Resolve a symlink |file_name| relative to |root|.
This can be used to resolve absolute symlinks within an alternative root
path (i.e. chroot). For example:
ROOT-A/absolute_symlink --> /an/abs/path
ROOT-A/relative_symlink --> a/relative/path
absolute_symlink will be resolved to ROOT-A/an/abs/path
relative_symlink will be resolved to ROOT-A/a/relative/path
Args:
file_name (str): A path to the file.
root (str|None): A path to the root directory.
Returns:
|file_name| if |file_name| is not a symlink. Otherwise, the ultimate path
that |file_name| points to, with links resolved relative to |root|.
"""
count = 0
while os.path.islink(file_name):
count += 1
if count > 128:
raise ValueError('Too many link levels for %s.' % file_name)
link = os.readlink(file_name)
if link.startswith('/'):
file_name = os.path.join(root, link[1:]) if root else link
else:
file_name = os.path.join(os.path.dirname(file_name), link)
return file_name
def ResolveSymlink(file_name):
"""Resolve a symlink |file_name| to an absolute path.
This is similar to ResolveSymlinkInRoot, but does not resolve absolute
symlinks to an alternative root, and normalizes the path before returning.
Args:
file_name (str): The symlink.
Returns:
str - |file_name| if |file_name| is not a symlink. Otherwise, the ultimate
path that |file_name| points to.
"""
return os.path.realpath(ResolveSymlinkInRoot(file_name, None))
def IsInsideVm():
"""Return True if we are running inside a virtual machine.
The detection is based on the model of the hard drive.
"""
for blk_model in glob.glob('/sys/block/*/device/model'):
if os.path.isfile(blk_model):
model = ReadFile(blk_model)
if model.startswith('VBOX') or model.startswith('VMware'):
return True
return False
| endlessm/chromium-browser | third_party/chromite/lib/osutils.py | Python | bsd-3-clause | 45,242 |
from genshi.builder import tag
from trac.core import implements,Component
from trac.ticket.api import ITicketActionController
from trac.perm import IPermissionRequestor
revision = "$Rev: 6326 $"
url = "$URL: https://svn.edgewall.org/repos/trac/trunk/sample-plugins/workflow/DeleteTicket.py $"
class DeleteTicketActionController(Component):
"""Provides the admin with a way to delete a ticket.
Illustrates how to create an action controller with side-effects.
Don't forget to add `DeleteTicketActionController` to the workflow
option in the `[ticket]` section in TracIni.
If there is no other workflow option, the line will look like this:
{{{
workflow = ConfigurableTicketWorkflow,DeleteTicketActionController
}}}
"""
implements(ITicketActionController, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
return ['TICKET_DELETE']
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
actions = []
if 'TICKET_DELETE' in req.perm(ticket.resource):
actions.append((0,'delete'))
return actions
def get_all_status(self):
return []
def render_ticket_action_control(self, req, ticket, action):
return ("delete ticket", '', "This ticket will be deleted.")
def get_ticket_changes(self, req, ticket, action):
return {}
def apply_action_side_effects(self, req, ticket, action):
# Be paranoid here, as this should only be called when
# action is delete...
if action == 'delete':
ticket.delete()
| trac-ja/trac-ja | sample-plugins/workflow/DeleteTicket.py | Python | bsd-3-clause | 1,633 |
"""
vtkImageImportFromArray: a NumPy front-end to vtkImageImport
Load a Numeric Python array into a VTK image.
To use this class, you must have the LLNL Numeric Python distribution
(http://numpy.sf.net)
Methods:
GetOutput() -- connect to VTK image pipeline
SetArray() -- set the array to load in
Convert python 'Int' to VTK_UNSIGNED_SHORT:
(python doesn't support unsigned short, so this might be necessary)
SetConvertIntToUnsignedShort(yesno)
ConvertIntToUnsignedShortOn()
ConvertIntToUnsignedShortOff()
Methods from vtkImageImport:
(if you don't set these, sensible defaults will be used)
SetDataExtent()
SetDataSpacing()
SetDataOrigin()
"""
import Numeric
from vtk import vtkImageImport
from vtkConstants import *
_NEW_NUMERIC = 0
try:
val = float(Numeric.__version__)
except ValueError:
_NEW_NUMERIC = 0
else:
if val > 20.0:
_NEW_NUMERIC = 1
else:
_NEW_NUMERIC = 0
class vtkImageImportFromArray:
def __init__(self):
self.__import = vtkImageImport()
self.__ConvertIntToUnsignedShort = 0
self.__Array = None
# type dictionary: note that python doesn't support
# unsigned integers properly!
__typeDict = {'c':VTK_UNSIGNED_CHAR,
'b':VTK_UNSIGNED_CHAR,
'1':VTK_CHAR,
's':VTK_SHORT,
'i':VTK_INT,
'l':VTK_LONG,
'f':VTK_FLOAT,
'd':VTK_DOUBLE,
'F':VTK_FLOAT,
'D':VTK_DOUBLE }
__sizeDict = { VTK_CHAR:1,
VTK_UNSIGNED_CHAR:1,
VTK_SHORT:2,
VTK_UNSIGNED_SHORT:2,
VTK_INT:4,
VTK_LONG:4,
VTK_FLOAT:4,
VTK_DOUBLE:8 }
# convert 'Int32' to 'unsigned short'
def SetConvertIntToUnsignedShort(self,yesno):
self.__ConvertIntToUnsignedShort = yesno
def GetConvertIntToUnsignedShort(self):
return self.__ConvertIntToUnsignedShort
def ConvertIntToUnsignedShortOn(self):
self.__ConvertIntToUnsignedShort = 1
def ConvertIntToUnsignedShortOff(self):
self.__ConvertIntToUnsignedShort = 0
# get the output
def GetOutput(self):
return self.__import.GetOutput()
# import an array
def SetArray(self,imArray):
self.__Array = imArray
numComponents = 1
dim = imArray.shape
if (len(dim) == 4):
numComponents = dim[3]
dim = (dim[0],dim[1],dim[2])
type = self.__typeDict[imArray.typecode()]
if (imArray.typecode() == 'F' or imArray.typecode == 'D'):
numComponents = numComponents * 2
if (self.__ConvertIntToUnsignedShort and imArray.typecode() == 'i'):
if _NEW_NUMERIC:
imTmpArr = imArray.astype(Numeric.Int16).flat
else:
imTmpArr = imArray.astype(Numeric.Int16).tostring()
type = VTK_UNSIGNED_SHORT
else:
if _NEW_NUMERIC:
imTmpArr = imArray.flat
else:
imTmpArr = imArray.tostring()
size = len(imTmpArr)*self.__sizeDict[type]
self.__import.CopyImportVoidPointer(imTmpArr, size)
self.__import.SetDataScalarType(type)
self.__import.SetNumberOfScalarComponents(numComponents)
extent = self.__import.GetDataExtent()
self.__import.SetDataExtent(extent[0],extent[0]+dim[2]-1,
extent[2],extent[2]+dim[1]-1,
extent[4],extent[4]+dim[0]-1)
self.__import.SetWholeExtent(extent[0],extent[0]+dim[2]-1,
extent[2],extent[2]+dim[1]-1,
extent[4],extent[4]+dim[0]-1)
def GetArray(self):
return self.__Array
# a whole bunch of methods copied from vtkImageImport
def SetDataExtent(self,extent):
self.__import.SetDataExtent(extent)
def GetDataExtent(self):
return self.__import.GetDataExtent()
def SetDataSpacing(self,spacing):
self.__import.SetDataSpacing(spacing)
def GetDataSpacing(self):
return self.__import.GetDataSpacing()
def SetDataOrigin(self,origin):
self.__import.SetDataOrigin(origin)
def GetDataOrigin(self):
return self.__import.GetDataOrigin()
| jeffbaumes/jeffbaumes-vtk | Wrapping/Python/vtk/util/vtkImageImportFromArray.py | Python | bsd-3-clause | 4,414 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'website.views.home', name='home'),
# url(r'^website/', include('website.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^$','learn.views.home',name='home'),
#the rule of the name is so important
url(r'^add/$','calc.views.add',name='add'),
url(r'^add2/(\d+)/(\d+)/','calc.views.add2',name='add2'),
)
| fqc/django_test | website/website/urls.py | Python | mit | 787 |
#!/usr/bin/env python
import os
from setuptools import setup
setup(
name="zict",
version="2.1.0.dev3",
description="Mutable mapping tools",
url="http://zict.readthedocs.io/en/latest/",
maintainer="Matthew Rocklin",
maintainer_email="mrocklin@gmail.com",
license="BSD",
keywords="mutable mapping,dict,dask",
packages=["zict"],
install_requires=open("requirements.txt").read().strip().split("\n"),
long_description=(
open("README.rst").read() if os.path.exists("README.rst") else ""
),
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
zip_safe=False,
)
| dask/zict | setup.py | Python | bsd-3-clause | 829 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GTKGpxdata.py
#
# Copyright 2010-2015 Jose Riguera Lopez <jriguera@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Add-on for PhotoPlace to generate paths and waypoints from GPX tracks to show them in the KML layer.
"""
__program__ = "photoplace.gpxdata"
__author__ = "Jose Riguera Lopez <jriguera@gmail.com>"
__version__ = "0.2.3"
__date__ = "Dec 2014"
__license__ = "Apache 2.0"
__copyright__ ="(c) Jose Riguera Lopez"
import os.path
import warnings
import gettext
import locale
warnings.filterwarnings('ignore', module='gtk')
try:
import pygtk
pygtk.require("2.0")
import gtk
import gobject
except Exception as e:
warnings.resetwarnings()
print("Warning: %s" % str(e))
print("You don't have the PyGTK 2.0 module installed")
raise
warnings.resetwarnings()
from gpxdata import *
from PhotoPlace.UserInterface.GTKTemplateEditor import TemplateEditorGUI
from PhotoPlace.UserInterface.GTKUI import CellRendererTextClick
# I18N gettext support
__GETTEXT_DOMAIN__ = __program__
__PACKAGE_DIR__ = os.path.abspath(os.path.dirname(__file__))
__LOCALE_DIR__ = os.path.join(__PACKAGE_DIR__, u"locale")
try:
if not os.path.isdir(__LOCALE_DIR__):
print ("Error: Cannot locate default locale dir: '%s'." % (__LOCALE_DIR__))
__LOCALE_DIR__ = None
locale.setlocale(locale.LC_ALL,"")
#gettext.bindtextdomain(__GETTEXT_DOMAIN__, __LOCALE_DIR__)
t = gettext.translation(__GETTEXT_DOMAIN__, __LOCALE_DIR__, fallback=False)
_ = t.ugettext
except Exception as e:
print ("Error setting up the translations: %s" % (str(e)))
_ = lambda s: unicode(s)
# columns
(
_GTKGPXData_COLUMN_ID,
_GTKGPXData_COLUMN_TYPE,
_GTKGPXData_COLUMN_ACT,
_GTKGPXData_COLUMN_VIS,
_GTKGPXData_COLUMN_KEY,
_GTKGPXData_COLUMN_VKEY,
_GTKGPXData_COLUMN_VALUE,
_GTKGPXData_COLUMN_EDITKEY,
_GTKGPXData_COLUMN_EDITVAL,
_GTKGPXData_COLUMN_CLICK,
_GTKGPXData_COLUMN_TOOLTIP,
_GTKGPXData_COLUMN_FAMILY,
) = range(12)
# Data type
(
_GTKGPXData_TYPE_TITLE,
_GTKGPXData_TYPE_PATH,
_GTKGPXData_TYPE_TRACK,
_GTKGPXData_TYPE_WPT,
) = range(4)
# 2 types for columns
(
_GTKGPXData_COLUMN_TYPE_KEY,
_GTKGPXData_COLUMN_TYPE_VALUE,
) = range(2)
_GTKGPXData_DESCRIPTION_CHARS = 45
_GTKGPXData_DEFAULT_FAMILY = None
_GTKGPXData_DESC_FAMILY = "Monospace"
_GTKGPXData_TRACKS_WIDTH = _("Width")
_GTKGPXData_TRACKS_COLOR = _("Color")
class GTKGPXData(object):
def __init__(self, gui, userfacade, logger):
object.__init__(self)
self.options = None
self.goptions = None
self.tracksinfo = None
self.logger = logger
self.userfacade = userfacade
self.plugin = gtk.VBox(False)
# 1st line
hbox_checks = gtk.HBox(False)
label = gtk.Label()
label.set_markup(_("Include in KML: "))
hbox_checks.pack_start(label, False, False, 5)
self.checkbutton_genpath = gtk.CheckButton(_("Path from geottaged photos"))
self.checkbutton_genpath.connect('toggled',
self._toggled_button, _GTKGPXData_TYPE_PATH)
self.checkbutton_genpath.set_sensitive(False)
hbox_checks.pack_start(self.checkbutton_genpath, False, False, 5)
self.checkbutton_gentrack = gtk.CheckButton(_("Tracks from GPX"))
self.checkbutton_gentrack.connect('toggled',
self._toggled_button, _GTKGPXData_TYPE_TRACK)
self.checkbutton_gentrack.set_sensitive(False)
hbox_checks.pack_start(self.checkbutton_gentrack, False, False, 8)
self.checkbutton_genwpts = gtk.CheckButton(_("WayPoints from GPX"))
self.checkbutton_genwpts.connect('toggled',
self._toggled_button, _GTKGPXData_TYPE_WPT)
self.checkbutton_genwpts.set_sensitive(False)
hbox_checks.pack_start(self.checkbutton_genwpts, False, False, 5)
self.plugin.pack_start(hbox_checks, False, False, 10)
# Parameters
scroll = gtk.ScrolledWindow()
scroll.set_shadow_type(gtk.SHADOW_ETCHED_IN)
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
# create model for parameters
self.treestore = gtk.TreeStore(
int, int, bool, bool, str, str, str, bool, bool, bool, str, str)
treeview = gtk.TreeView(self.treestore)
treeview.set_rules_hint(True)
treeview.set_tooltip_column(_GTKGPXData_COLUMN_TOOLTIP)
treeview.get_selection().set_mode(gtk.SELECTION_SINGLE)
treeview.connect('button-press-event' , self._lclick_row)
# columns
renderer = gtk.CellRendererToggle()
renderer.set_radio(True)
renderer.connect('toggled', self._toggled_row)
column = gtk.TreeViewColumn(None, renderer,
active=_GTKGPXData_COLUMN_ACT,
visible=_GTKGPXData_COLUMN_VIS)
treeview.append_column(column)
renderer = gtk.CellRendererText()
renderer.connect('edited', self._edit_cell, _GTKGPXData_COLUMN_TYPE_KEY)
column = gtk.TreeViewColumn(_("Path Name/Parameter"), renderer,
text=_GTKGPXData_COLUMN_VKEY,
editable=_GTKGPXData_COLUMN_EDITKEY)
column.set_resizable(True)
treeview.append_column(column)
treeview.set_expander_column(column)
renderer = CellRendererTextClick()
renderer.connect('editing-started', self._edit_property)
renderer.connect('edited', self._edit_cell, _GTKGPXData_COLUMN_TYPE_VALUE)
column = gtk.TreeViewColumn(_("Description/Value"), renderer,
text=_GTKGPXData_COLUMN_VALUE,
editable=_GTKGPXData_COLUMN_EDITVAL,
clickable=_GTKGPXData_COLUMN_CLICK,
family=_GTKGPXData_COLUMN_FAMILY)
column.set_resizable(True)
treeview.append_column(column)
scroll.add(treeview)
self.plugin.pack_start(scroll, True, True)
self.windoweditor = TemplateEditorGUI()
self.window = gui.builder.get_object("window")
def show(self, widget=None, options=None, tracks=None, wpts=None, goptions=None):
if widget:
widget.add(self.plugin)
if options:
self.setup(options, tracks, wpts, goptions)
self.plugin.show_all()
def setup(self, options, tracks, wpts, goptions):
self.tracksinfo = tracks
self.wptsinfo = wpts
self.options = options
self.goptions = goptions
self.reset()
def hide(self, reset=False):
self.plugin.hide_all()
if reset:
self.reset()
def reset(self):
self.treestore.clear()
self.iterator_paths = None
self.pathlist = {}
self.pathstyles = {}
self.iterator_tracks = None
self.tracklist = {}
self.trackstyles = {}
self.iterator_wpts = None
self.wptlist = {}
self.wptstyles = {}
if self.checkbutton_genpath.get_active() != self.options[GPXData_CONFKEY_GENPATH]:
self.checkbutton_genpath.set_active(self.options[GPXData_CONFKEY_GENPATH])
else:
tmp = self.options[GPXData_CONFKEY_GENPATH]
self.checkbutton_genpath.set_active(False)
if tmp:
self.checkbutton_genpath.set_active(True)
if self.checkbutton_gentrack.get_active() != self.options[GPXData_CONFKEY_GENTRACK]:
self.checkbutton_gentrack.set_active(self.options[GPXData_CONFKEY_GENTRACK])
else:
tmp = self.options[GPXData_CONFKEY_GENTRACK]
self.checkbutton_gentrack.set_active(False)
if tmp:
self.checkbutton_gentrack.set_active(True)
if self.checkbutton_genwpts.get_active() != self.options[GPXData_CONFKEY_GENPOINTS]:
self.checkbutton_genwpts.set_active(self.options[GPXData_CONFKEY_GENPOINTS])
else:
tmp = self.options[GPXData_CONFKEY_GENPOINTS]
self.checkbutton_genwpts.set_active(False)
if tmp:
self.checkbutton_genwpts.set_active(True)
self.checkbutton_genpath.set_sensitive(False)
self.checkbutton_gentrack.set_sensitive(False)
self.checkbutton_genwpts.set_sensitive(False)
def _toggled_button(self, widget, data):
value = widget.get_active()
if data == _GTKGPXData_TYPE_PATH:
self.options[GPXData_CONFKEY_GENPATH] = value
self.set_paths(value)
elif data == _GTKGPXData_TYPE_TRACK:
self.options[GPXData_CONFKEY_GENTRACK] = value
self.set_tracks(value)
elif data == _GTKGPXData_TYPE_WPT:
self.options[GPXData_CONFKEY_GENPOINTS] = value
self.set_wpts(value)
def clear(self, iterator, remove=True):
if iterator == None:
return
deleted = []
nchildren = self.treestore.iter_n_children(iterator)
while nchildren > 0:
nchildren -= 1
ite = self.treestore.iter_nth_child(iterator, nchildren)
deleted.append(ite)
for ite in deleted:
self.treestore.remove(ite)
if remove:
self.treestore.remove(iterator)
def set_paths_widget(self, mode=True):
self.checkbutton_genpath.set_sensitive(mode)
if self.options[GPXData_CONFKEY_GENPATH]:
self.checkbutton_genpath.set_active(mode)
def set_tracks_widget(self, mode=True):
self.checkbutton_gentrack.set_sensitive(mode)
if self.options[GPXData_CONFKEY_GENTRACK]:
self.checkbutton_gentrack.set_active(mode)
def set_wpts_widget(self, mode=True):
self.checkbutton_genwpts.set_sensitive(mode)
if self.options[GPXData_CONFKEY_GENPOINTS]:
self.checkbutton_genwpts.set_active(mode)
def set_paths(self, mode=True):
if mode:
tip = _("List of generated paths from geotagged photos")
name = _("Paths from geottaged photos")
self.iterator_paths = self.treestore.append(None,
[-1, _GTKGPXData_TYPE_PATH, False, False, '',
name , '', False, False, False, tip, None])
self.add_paths()
else:
self.clear(self.iterator_paths)
self.iterator_paths = None
def add_paths(self, pathlist=None, pathstyles=None):
if pathlist != None and pathstyles != None:
self.pathlist = pathlist
self.pathstyles = pathstyles
num = 0
if self.options[GPXData_CONFKEY_GENPATH]:
for path_id, path in self.pathlist.iteritems():
style = self.pathstyles[path_id]
self.add_track(self.iterator_paths, path_id, path, style, _GTKGPXData_TYPE_PATH)
num += 1
desc = _(" %d path(s)") % num
self.treestore.set(self.iterator_paths, _GTKGPXData_COLUMN_VALUE, desc)
return num
def set_tracks(self, mode=True):
if mode:
tip = _("List of tracks read from GPX file")
name = _("Tracks from GPS data")
self.iterator_tracks = self.treestore.append(None,
[-1, _GTKGPXData_TYPE_TRACK, False, False, '',
name , '', False, False, False, tip, None])
self.add_tracks()
else:
self.clear(self.iterator_tracks)
self.iterator_tracks = None
def add_tracks(self, tracklist=None, trackstyles=None):
if tracklist != None and trackstyles != None:
self.tracklist = tracklist
self.trackstyles = trackstyles
num = 0
if self.options[GPXData_CONFKEY_GENTRACK]:
for track_id, track in self.tracklist.iteritems():
style = self.trackstyles[track_id]
self.add_track(self.iterator_tracks, track_id, track, style)
num += 1
desc = _(" %d track(s)") % num
self.treestore.set(self.iterator_tracks, _GTKGPXData_COLUMN_VALUE, desc)
return num
def set_wpts(self, mode=True):
if mode:
tip = _("List of WayPoints read from GPX file")
name = _("WayPoints from GPS data")
self.iterator_wpts = self.treestore.append(None,
[-1, _GTKGPXData_TYPE_WPT, False, False, '',
name , '', False, False, False, tip, None])
self.add_wpts()
else:
self.clear(self.iterator_wpts)
self.iterator_wpts = None
def add_wpts(self, wptlist=None, wptstyles=None):
if wptlist != None and wptstyles != None:
self.wptlist = wptlist
self.wptstyles = wptstyles
num = 0
if self.options[GPXData_CONFKEY_GENPOINTS]:
for wpt_id, wpt in self.wptlist.iteritems():
style = self.wptstyles[wpt_id]
self.add_wpt(self.iterator_wpts, wpt_id, wpt, style)
num += 1
desc = _(" %d waypoint(s)") % num
self.treestore.set(self.iterator_wpts, _GTKGPXData_COLUMN_VALUE, desc)
return num
def add_track(self, iterator, track_id, track, style, kind=_GTKGPXData_TYPE_TRACK):
desc = track.desc
name = track.name
dgettext = dict()
color = GPXData_TRACKS_COLOR
if style.has_key(GPXData_CONFKEY_TRACKS_COLOR):
color = style[GPXData_CONFKEY_TRACKS_COLOR]
width = GPXData_TRACKS_WIDTH
if style.has_key(GPXData_CONFKEY_TRACKS_WIDTH):
width = style[GPXData_CONFKEY_TRACKS_WIDTH]
dgettext['path_oname'] = track.attr['.orig-name']
dgettext['path_id'] = track_id
tooltip = _("Track #%(path_id)s (original name: %(path_oname)s)\n")
try:
(tmin, tmax, duration) = track.timeMinMaxDuration()
(lmin, lmax, length) = track.lengthMinMaxTotal()
dgettext['path_npoints'] = len(track.listpoints())
dgettext['path_len'] = length
dgettext['path_time'] = duration
dgettext['path_end'] = tmax
dgettext['path_begin'] = tmin
tooltip += _("Points: %(path_npoints)s, length: %(path_len).3f m.\n")
tooltip += _("Duration: %(path_time)s\n")
tooltip += _("Begin time: %(path_begin)s\n")
tooltip += _("Final time: %(path_end)s\n")
except:
pass
tooltip = tooltip % dgettext
default_tip = _("Double click to edit values ...")
tip = tooltip + '\n' + default_tip
status = bool(track.status)
ite = self.treestore.append(iterator, [
track_id, kind, status, True, GPXData_CONFKEY_TRACKS_NAME,
name, desc, True, True, False, tip, _GTKGPXData_DEFAULT_FAMILY])
self.treestore.append(ite, [
track_id, kind, None, False, GPXData_CONFKEY_TRACKS_COLOR,
_GTKGPXData_TRACKS_COLOR, color, False, True, True,
default_tip, _GTKGPXData_DEFAULT_FAMILY])
self.treestore.append(ite, [
track_id, kind, None, False, GPXData_CONFKEY_TRACKS_WIDTH,
_GTKGPXData_TRACKS_WIDTH, width, False, True, False,
default_tip, _GTKGPXData_DEFAULT_FAMILY])
def add_wpt(self, iterator, wpt_id, wpt, style):
name = wpt.attr['name']
desc = wpt.attr['desc']
dgettext = dict()
icon = GPXData_WPT_ICON
if style.has_key(GPXData_CONFKEY_WPT_ICON):
icon = style[GPXData_CONFKEY_WPT_ICON]
scale = GPXData_WPT_ICONSCALE
if style.has_key(GPXData_CONFKEY_WPT_ICONSCALE):
scale = style[GPXData_CONFKEY_WPT_ICONSCALE]
dgettext['wpt_oname'] = wpt.attr['.orig-name']
dgettext['wpt_id'] = wpt_id
tooltip = _("WayPoint #%(wpt_id)s (original name: %(wpt_oname)s)\n")
tooltip = tooltip % dgettext
default_tip = _("Double click to edit values ...")
tip = tooltip + '\n' + default_tip
status = bool(wpt.status)
ite = self.treestore.append(iterator, [
wpt_id, _GTKGPXData_TYPE_WPT, status, True, GPXData_CONFKEY_TRACKS_NAME,
name, desc, True, True, False, tip, None])
def _lclick_row(self, widget, event):
if event.button == 3:
paths_ite = widget.get_path_at_pos(int(event.x), int(event.y))
if paths_ite == None:
# invalid path
pass
elif len(paths_ite) > 0:
treestore_iter = self.treestore.get_iter(paths_ite[0])
key = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_KEY)
obj_id = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_ID)
if key == GPXData_CONFKEY_TRACKS_NAME or obj_id == -1:
kind = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_TYPE)
style = None
menu = gtk.Menu()
if obj_id < 0:
msg = _("Edit main default template ...")
if kind == _GTKGPXData_TYPE_TRACK:
msg = _("Edit main default template for tracks")
elif kind == _GTKGPXData_TYPE_PATH:
msg = _("Edit main default template for generated paths")
elif kind == _GTKGPXData_TYPE_WPT:
msg = _("Edit main default template for waypoints")
menu_edit = gtk.MenuItem(msg)
menu.append(menu_edit)
else:
active = True
if kind == _GTKGPXData_TYPE_TRACK:
style = self.trackstyles[obj_id]
if style.has_key(GPXData_CONFKEY_TRACKS_DESC) \
and style[GPXData_CONFKEY_TRACKS_DESC]:
active = False
elif kind == _GTKGPXData_TYPE_PATH:
style = self.pathstyles[obj_id]
if style.has_key(GPXData_CONFKEY_TRACKS_DESC) \
and style[GPXData_CONFKEY_TRACKS_DESC]:
active = False
elif kind == _GTKGPXData_TYPE_WPT:
style = self.wptstyles[obj_id]
if style.has_key(GPXData_CONFKEY_WPT_DESC) \
and style[GPXData_CONFKEY_WPT_DESC]:
active = False
menu_edit = gtk.MenuItem(_("Edit Template/Description"))
menu_default = gtk.CheckMenuItem(_("Default Template"))
menu_default.set_active(active)
menu.append(menu_edit)
menu.append(menu_default)
menu_default.connect("activate", self._activate_setdesc, style, kind, obj_id)
menu_edit.connect("activate", self._activate_menuedit, treestore_iter, style, kind, obj_id)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
def _activate_menuedit(self, widget, iterator, style, kind, obj_id):
if kind == _GTKGPXData_TYPE_WPT:
tooltip = _("\n<span font_family='monospace' size='small'>"
"<b>%(PhotoPlace.WptNAME)s</b> -> WayPoint name\n"
"<b>%(PhotoPlace.WptDESC)s</b> -> Description\n"
"<b>%(PhotoPlace.WptLAT)s</b> -> Latitude\n"
"<b>%(PhotoPlace.WptLON)s</b> -> Longitude\n"
"<b>%(PhotoPlace.WptELE)s</b> -> Altitude\n"
"<b>%(PhotoPlace.WptTIME)s</b> -> UTC Time\n"
"</span>")
autocompletions = [
PhotoPlace_WptNAME,
PhotoPlace_WptDESC,
PhotoPlace_WptLAT,
PhotoPlace_WptLON,
PhotoPlace_WptELE,
PhotoPlace_WptTIME,
]
else:
tooltip = _("\n<span font_family='monospace' size='small'>"
"<b>%(PhotoPlace.PathNAME)s</b> -> Path name\n"
"<b>%(PhotoPlace.PathDESC)s</b> -> Path description\n"
"<b>%(PhotoPlace.PathTINI)s</b> -> Initial time (first point)\n"
"<b>%(PhotoPlace.PathTEND)s</b> -> End time (last point)\n"
"<b>%(PhotoPlace.PathDRTN)s</b> -> Duration\n"
"<b>%(PhotoPlace.PathLEN)s</b> -> Length (in meters)\n"
"<b>%(PhotoPlace.PathLENMIN)s</b> -> Minimum length\n"
"<b>%(PhotoPlace.PathLENMAX)s</b> -> Maximum length\n"
"<b>%(PhotoPlace.PathSPMIN)s</b> -> Minimum speed (m/s)\n"
"<b>%(PhotoPlace.PathSPMAX)s</b> -> Maximum speed (m/s)\n"
"<b>%(PhotoPlace.PathSPAVG)s</b> -> Average speed (m/s)\n"
"<b>%(PhotoPlace.PathNSEG)s</b> -> Number of segments\n"
"<b>%(PhotoPlace.PathNWPT)s</b> -> Number of waypoints\n"
"</span>")
autocompletions = [
PhotoPlace_PathNAME,
PhotoPlace_PathDESC,
PhotoPlace_PathTINI,
PhotoPlace_PathTEND,
PhotoPlace_PathDRTN,
PhotoPlace_PathLEN,
PhotoPlace_PathLENMIN,
PhotoPlace_PathLENMAX,
PhotoPlace_PathSPMIN,
PhotoPlace_PathSPMAX,
PhotoPlace_PathSPAVG,
PhotoPlace_PathNSEG,
PhotoPlace_PathNWPT,
]
completions = []
for item in autocompletions:
completions.append("%(" + item + ")s")
text = ''
filename = None
if style != None:
if kind == _GTKGPXData_TYPE_WPT:
if style.has_key(GPXData_CONFKEY_WPT_DESC):
text = style[GPXData_CONFKEY_WPT_DESC]
if style.has_key(GPXData_CONFKEY_WPT_FTEMPLATE):
filename = style[GPXData_CONFKEY_WPT_FTEMPLATE]
elif kind == _GTKGPXData_TYPE_TRACK:
if style.has_key(GPXData_CONFKEY_TRACKS_DESC):
text = style[GPXData_CONFKEY_TRACKS_DESC]
if style.has_key(GPXData_CONFKEY_TRACKS_FTEMPLATE):
filename = style[GPXData_CONFKEY_TRACKS_FTEMPLATE]
else:
if style.has_key(GPXData_CONFKEY_TRACKS_DESC):
text = style[GPXData_CONFKEY_TRACKS_DESC]
if text == None:
text = self.tracksinfo[0][GPXData_CONFKEY_TRACKS_DESC]
self.windoweditor.show(text=text, template=filename, recover=filename,
completions=completions, tooltip=tooltip, cansave=False)
self.windoweditor.connect('close', self._editor_setdesc, style, kind, obj_id)
else:
cansave = True
if kind == _GTKGPXData_TYPE_PATH:
filename = self.tracksinfo[0][GPXData_CONFKEY_TRACKS_FTEMPLATE]
text = self.tracksinfo[0][GPXData_CONFKEY_TRACKS_DESC]
style = self.tracksinfo[0]
cansave = False
elif kind == _GTKGPXData_TYPE_TRACK:
filename = self.tracksinfo[1][GPXData_CONFKEY_TRACKS_FTEMPLATE]
text = self.tracksinfo[1][GPXData_CONFKEY_TRACKS_DESC]
style = self.tracksinfo[1]
elif kind == _GTKGPXData_TYPE_WPT:
filename = self.wptsinfo[0][GPXData_CONFKEY_WPT_FTEMPLATE]
text = self.wptsinfo[0][GPXData_CONFKEY_WPT_DESC]
style = self.wptsinfo[0]
if filename or text:
filename = os.path.basename(filename)
self.windoweditor.show(text=text, template=filename,
completions=completions, tooltip=tooltip, cansave=cansave)
self.windoweditor.connect('close', self._editor_setdesc, style, kind, obj_id)
def _editor_setdesc(self, obj, text, template, style, kind, obj_id):
if kind == _GTKGPXData_TYPE_WPT:
style[GPXData_CONFKEY_WPT_DESC] = text
else:
style[GPXData_CONFKEY_TRACKS_DESC] = text
def _activate_setdesc(self, widget, style, kind, obj_id, text=None):
if kind == _GTKGPXData_TYPE_WPT:
style[GPXData_CONFKEY_WPT_DESC] = text
elif kind == _GTKGPXData_TYPE_PATH:
style[GPXData_CONFKEY_TRACKS_DESC] = text
else:
style[GPXData_CONFKEY_TRACKS_DESC] = text
def _toggled_row(self, widget, path_string):
treestore_iter = self.treestore.get_iter_from_string(path_string)
obj_id = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_ID)
kind = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_TYPE)
if obj_id >= 0:
value = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_ACT)
value = not value
self.treestore.set(treestore_iter, _GTKGPXData_COLUMN_ACT, value)
if kind == _GTKGPXData_TYPE_WPT:
wpt = self.wptlist[obj_id]
wpt.status = int(value)
elif kind == _GTKGPXData_TYPE_PATH:
path = self.pathlist[obj_id]
path.status = int(value)
elif kind == _GTKGPXData_TYPE_TRACK:
track = self.tracklist[obj_id]
track.status = int(value)
def _edit_cell(self, cell, path_string, new_text, column):
treestore_iter = self.treestore.get_iter_from_string(path_string)
key = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_KEY)
if column == _GTKGPXData_COLUMN_TYPE_VALUE:
obj_id = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_ID)
kind = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_TYPE)
if key == GPXData_CONFKEY_TRACKS_WIDTH:
try:
new = int(new_text)
self.treestore.set(treestore_iter, _GTKGPXData_COLUMN_VALUE, new_text)
if kind == _GTKGPXData_TYPE_PATH:
self.pathstyles[obj_id][GPXData_CONFKEY_TRACKS_WIDTH] = new_text
else:
self.trackstyles[obj_id][GPXData_CONFKEY_TRACKS_WIDTH] = new_text
except:
pass
elif key == GPXData_CONFKEY_TRACKS_NAME:
if kind == _GTKGPXData_TYPE_WPT:
wpt = self.wptlist[obj_id]
wpt.attr['desc'] = str(new_text)
elif kind == _GTKGPXData_TYPE_PATH:
path = self.pathlist[obj_id]
path.desc = str(new_text)
elif kind == _GTKGPXData_TYPE_TRACK:
track = self.tracklist[obj_id]
track.desc = str(new_text)
self.treestore.set(treestore_iter, _GTKGPXData_COLUMN_VALUE, new_text)
else:
if key == GPXData_CONFKEY_TRACKS_NAME:
old_name = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_VKEY)
obj_id = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_ID)
kind = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_TYPE)
if kind == _GTKGPXData_TYPE_WPT:
wpt = self.wptlist[obj_id]
wpt.attr['name'] = str(new_text)
elif kind == _GTKGPXData_TYPE_PATH:
path = self.pathlist[obj_id]
path.name = str(new_text)
elif kind == _GTKGPXData_TYPE_TRACK:
track = self.tracklist[obj_id]
track.name = str(new_text)
self.treestore.set(treestore_iter, _GTKGPXData_COLUMN_VKEY, new_text)
def _edit_property(self, cellrenderer, editable, path_string):
treestore_iter = self.treestore.get_iter_from_string(path_string)
key = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_KEY)
obj_id = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_ID)
kind = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_TYPE)
if kind == _GTKGPXData_TYPE_WPT or obj_id < 0:
return
if key == GPXData_CONFKEY_TRACKS_COLOR:
value = self.treestore.get_value(treestore_iter, _GTKGPXData_COLUMN_VALUE)
color = self._show_color(value)
if color:
self.treestore.set(treestore_iter, _GTKGPXData_COLUMN_VALUE, color)
if kind == _GTKGPXData_TYPE_PATH:
self.pathstyles[obj_id][GPXData_CONFKEY_TRACKS_COLOR] = color
elif kind == _GTKGPXData_TYPE_TRACK:
self.trackstyles[obj_id][GPXData_CONFKEY_TRACKS_COLOR] = color
def _show_color(self, value, title=_("Select a color for path ...")):
dialog = gtk.ColorSelectionDialog(title)
colorsel = dialog.get_color_selection()
colorsel.set_has_opacity_control(True)
try:
colorsel.set_current_alpha(int((int(value[0:2], 16)*65535)/255))
colorsel.set_current_color(gtk.gdk.Color(
red=int((int(value[6:8], 16)*65535)/255),
green=int((int(value[4:6], 16)*65535)/255),
blue=int((int(value[2:4], 16)*65535)/255) ))
except:
pass
color_str = None
if dialog.run() == gtk.RESPONSE_OK:
color = colorsel.get_current_color()
alpha = colorsel.get_current_alpha()
color_str = "%.2X%.2X%.2X%.2X" % (
int((alpha * 255)/65535),
int((color.blue * 255)/65535),
int((color.green * 255)/65535),
int((color.red * 255)/65535))
dialog.destroy()
return color_str
#EOF
| jriguera/photoplace | photoplace/addons/GPXData/GTKGpxdata.py | Python | apache-2.0 | 30,401 |
from plex import Plex
for item in Plex['library'].recently_added():
print '[%s] %s' % (item.type, item.title)
| fuzeman/plex.py | examples/recently_added.py | Python | mit | 116 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initializes an AdWordsClient without using yaml-cached credentials.
While our LoadFromStorage method provides a useful shortcut to instantiate a
client if you regularly use just one set of credentials, production applications
may need to swap out users. This example shows you how to create an OAuth 2.0
client and an AdWordsClient without relying on a yaml file.
"""
__author__ = 'Joseph DiLallo'
from googleads import adwords
from googleads import oauth2
# OAuth 2.0 credential information. In a real application, you'd probably be
# pulling these values from a credential storage.
CLIENT_ID = 'INSERT_CLIENT_ID_HERE'
CLIENT_SECRET = 'INSERT_CLIENT_SECRET_HERE'
REFRESH_TOKEN = 'INSERT_REFRESH_TOKEN_HERE'
# AdWords API information.
DEVELOPER_TOKEN = 'INSERT_DEVELOPER_TOKEN_HERE'
USER_AGENT = 'INSERT_USER_AGENT_HERE'
CLIENT_CUSTOMER_ID = 'INSERT_CLIENT_CUSTOMER_ID_HERE'
def main(client_id, client_secret, refresh_token, developer_token, user_agent,
client_customer_id):
oauth2_client = oauth2.GoogleRefreshTokenClient(
client_id, client_secret, refresh_token)
adwords_client = adwords.AdWordsClient(
developer_token, oauth2_client, user_agent, client_customer_id)
customer = adwords_client.GetService('CustomerService').get()
print 'You are logged in as customer: %s' % customer['customerId']
if __name__ == '__main__':
main(CLIENT_ID, CLIENT_SECRET, REFRESH_TOKEN, DEVELOPER_TOKEN, USER_AGENT,
CLIENT_CUSTOMER_ID)
| coxmediagroup/googleads-python-lib | examples/adwords/authentication/create_adwords_client_without_yaml.py | Python | apache-2.0 | 2,090 |
'''
Test cases for pyclbr.py
Nick Mathewson
'''
from test.test_support import run_unittest, import_module
import sys
from types import ClassType, FunctionType, MethodType, BuiltinFunctionType
import pyclbr
from unittest import TestCase
StaticMethodType = type(staticmethod(lambda: None))
ClassMethodType = type(classmethod(lambda c: None))
# Silence Py3k warning
import_module('commands', deprecated=True)
# This next line triggers an error on old versions of pyclbr.
from commands import getstatus
# Here we test the python class browser code.
#
# The main function in this suite, 'testModule', compares the output
# of pyclbr with the introspected members of a module. Because pyclbr
# is imperfect (as designed), testModule is called with a set of
# members to ignore.
class PyclbrTest(TestCase):
def assertListEq(self, l1, l2, ignore):
''' succeed iff {l1} - {ignore} == {l2} - {ignore} '''
missing = (set(l1) ^ set(l2)) - set(ignore)
if missing:
print >>sys.stderr, "l1=%r\nl2=%r\nignore=%r" % (l1, l2, ignore)
self.fail("%r missing" % missing.pop())
def assertHasattr(self, obj, attr, ignore):
''' succeed iff hasattr(obj,attr) or attr in ignore. '''
if attr in ignore: return
if not hasattr(obj, attr): print "???", attr
self.assertTrue(hasattr(obj, attr),
'expected hasattr(%r, %r)' % (obj, attr))
def assertHaskey(self, obj, key, ignore):
''' succeed iff key in obj or key in ignore. '''
if key in ignore: return
if key not in obj:
print >>sys.stderr, "***", key
self.assertIn(key, obj)
def assertEqualsOrIgnored(self, a, b, ignore):
''' succeed iff a == b or a in ignore or b in ignore '''
if a not in ignore and b not in ignore:
self.assertEqual(a, b)
def checkModule(self, moduleName, module=None, ignore=()):
''' succeed iff pyclbr.readmodule_ex(modulename) corresponds
to the actual module object, module. Any identifiers in
ignore are ignored. If no module is provided, the appropriate
module is loaded with __import__.'''
if module is None:
# Import it.
# ('<silly>' is to work around an API silliness in __import__)
module = __import__(moduleName, globals(), {}, ['<silly>'])
dict = pyclbr.readmodule_ex(moduleName)
def ismethod(oclass, obj, name):
classdict = oclass.__dict__
if isinstance(obj, FunctionType):
if not isinstance(classdict[name], StaticMethodType):
return False
else:
if not isinstance(obj, MethodType):
return False
if obj.im_self is not None:
if (not isinstance(classdict[name], ClassMethodType) or
obj.im_self is not oclass):
return False
else:
if not isinstance(classdict[name], FunctionType):
return False
objname = obj.__name__
if objname.startswith("__") and not objname.endswith("__"):
objname = "_%s%s" % (obj.im_class.__name__, objname)
return objname == name
# Make sure the toplevel functions and classes are the same.
for name, value in dict.items():
if name in ignore:
continue
self.assertHasattr(module, name, ignore)
py_item = getattr(module, name)
if isinstance(value, pyclbr.Function):
self.assertIsInstance(py_item, (FunctionType, BuiltinFunctionType))
if py_item.__module__ != moduleName:
continue # skip functions that came from somewhere else
self.assertEqual(py_item.__module__, value.module)
else:
self.assertIsInstance(py_item, (ClassType, type))
if py_item.__module__ != moduleName:
continue # skip classes that came from somewhere else
real_bases = [base.__name__ for base in py_item.__bases__]
pyclbr_bases = [ getattr(base, 'name', base)
for base in value.super ]
try:
self.assertListEq(real_bases, pyclbr_bases, ignore)
except:
print >>sys.stderr, "class=%s" % py_item
raise
actualMethods = []
for m in py_item.__dict__.keys():
if ismethod(py_item, getattr(py_item, m), m):
actualMethods.append(m)
foundMethods = []
for m in value.methods.keys():
if m[:2] == '__' and m[-2:] != '__':
foundMethods.append('_'+name+m)
else:
foundMethods.append(m)
try:
self.assertListEq(foundMethods, actualMethods, ignore)
self.assertEqual(py_item.__module__, value.module)
self.assertEqualsOrIgnored(py_item.__name__, value.name,
ignore)
# can't check file or lineno
except:
print >>sys.stderr, "class=%s" % py_item
raise
# Now check for missing stuff.
def defined_in(item, module):
if isinstance(item, ClassType):
return item.__module__ == module.__name__
if isinstance(item, FunctionType):
return item.func_globals is module.__dict__
return False
for name in dir(module):
item = getattr(module, name)
if isinstance(item, (ClassType, FunctionType)):
if defined_in(item, module):
self.assertHaskey(dict, name, ignore)
def test_easy(self):
self.checkModule('pyclbr')
self.checkModule('doctest', ignore=("DocTestCase",))
# Silence Py3k warning
rfc822 = import_module('rfc822', deprecated=True)
self.checkModule('rfc822', rfc822)
self.checkModule('difflib')
def test_decorators(self):
# XXX: See comment in pyclbr_input.py for a test that would fail
# if it were not commented out.
#
self.checkModule('test.pyclbr_input')
def test_others(self):
cm = self.checkModule
# These were once about the 10 longest modules
cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator
cm('cgi', ignore=('log',)) # set with = in module
cm('urllib', ignore=('_CFNumberToInt32',
'_CStringFromCFString',
'_CFSetup',
'getproxies_registry',
'proxy_bypass_registry',
'proxy_bypass_macosx_sysconf',
'open_https',
'getproxies_macosx_sysconf',
'getproxies_internetconfig',)) # not on all platforms
cm('pickle')
cm('aifc', ignore=('openfp',)) # set with = in module
cm('Cookie')
cm('sre_parse', ignore=('dump',)) # from sre_constants import *
cm('pdb')
cm('pydoc')
# Tests for modules inside packages
cm('email.parser')
cm('test.test_pyclbr')
def test_issue_14798(self):
# test ImportError is raised when the first part of a dotted name is
# not a package
self.assertRaises(ImportError, pyclbr.readmodule_ex, 'asyncore.foo')
def test_main():
run_unittest(PyclbrTest)
if __name__ == "__main__":
test_main()
| teeple/pns_server | work/install/Python-2.7.4/Lib/test/test_pyclbr.py | Python | gpl-2.0 | 7,898 |
# Authors: Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
def Email(mail, notEmpty=True):
"""Do some basic validation of an e-mail address.
Return True if ok
Return False if not
If notEmpty is True the this will return an error if the field
is "" or None.
"""
usernameRE = re.compile(r"^[^ \t\n\r@<>()]+$", re.I)
domainRE = re.compile(r"^[a-z0-9][a-z0-9\.\-_]*\.[a-z]+$", re.I)
if not mail or mail is None:
if notEmpty is True:
return False
else:
return True
mail = mail.strip()
s = mail.split('@', 1)
try:
username, domain=s
except ValueError:
return False
if not usernameRE.search(username):
return False
if not domainRE.search(domain):
return False
return True
def Plain(text, notEmpty=False, allowSpaces=True):
"""Do some basic validation of a plain text field
Return True if ok
Return False if not
If notEmpty is True the this will return an error if the field
is "" or None.
"""
if (text is None) or (not text.strip()):
if notEmpty is True:
return False
else:
return True
if allowSpaces:
textRE = re.compile(r"^[a-zA-Z_\-0-9\'\ ]*$")
else:
textRE = re.compile(r"^[a-zA-Z_\-0-9\']*$")
if not textRE.search(text):
return False
return True
def String(text, notEmpty=False):
"""A string type. This is much looser in what it allows than plain"""
if text is None or not text.strip():
if notEmpty is True:
return False
else:
return True
return True
def Path(text, notEmpty=False):
"""Do some basic validation of a path
Return True if ok
Return False if not
If notEmpty is True the this will return an error if the field
is "" or None.
"""
textRE = re.compile(r"^[a-zA-Z_\-0-9\\ \.\/\\:]*$")
if not text and notEmpty is True:
return False
if text is None:
if notEmpty is True:
return False
else:
return True
if not textRE.search(text):
return False
return True
def GoodName(text, notEmpty=False):
"""From shadow-utils:
User/group names must match gnu e-regex:
[a-zA-Z0-9_.][a-zA-Z0-9_.-]{0,30}[a-zA-Z0-9_.$-]?
as a non-POSIX, extension, allow "$" as the last char for
sake of Samba 3.x "add machine script"
Return True if ok
Return False if not
"""
textRE = re.compile(r"^[a-zA-Z0-9_.][a-zA-Z0-9_.-]{0,30}[a-zA-Z0-9_.$-]?$")
if not text and notEmpty is True:
return False
if text is None:
if notEmpty is True:
return False
else:
return True
m = textRE.match(text)
if not m or text != m.group(0):
return False
return True
| tbabej/freeipa | ipapython/ipavalidate.py | Python | gpl-3.0 | 3,633 |
# encoding: utf-8
from __future__ import print_function
from yade import pack,export,plot
import math,os,sys
print('checkColliderConstantness for InsertionSortCollider')
#### This is useful for printing the linenumber in the script
# import inspect
# print inspect.currentframe().f_lineno
if((opts.threads != None and opts.threads != 1) or (opts.cores != None and opts.cores != '1')):
raise YadeCheckError("This test will only work on single core, because it must be fully reproducible, but -j "+str(opts.threads)+" or --cores "+str(opts.cores)+" is used.")
from yade import pack
# I had a third O.run( 500, True); and so there was
# [None,None,None] below, but I decided that it is too much testing.
results={True:[None,None],False:[None,None]}
#checksPath="." # this line was used for working on this script locally.
for usePeriod in [True,False]:
O.periodic=usePeriod
length=1.0
height=1.0
width=1.0
thickness=0.1
if(usePeriod):
O.cell.hSize=Matrix3(length, 0, 0, 0 ,3.*height, 0, 0, 0, width)
O.materials.append(FrictMat(density=1,young=1e5,poisson=0.3,frictionAngle=radians(30),label='boxMat'))
lowBox = box( center=(length/2.0,thickness*0.6,width/2.0), extents=(length*2.0,thickness/2.0,width*2.0) ,fixed=True,wire=False)
O.bodies.append(lowBox)
radius=0.01
O.materials.append(FrictMat(density=1000,young=1e4,poisson=0.3,frictionAngle=radians(30),label='sphereMat'))
sp=pack.SpherePack()
#sp.makeCloud((0.*length,height+1.2*radius,0.25*width),(0.5*length,2*height-1.2*radius,0.75*width),-1,.2,2000,periodic=True)
sp.load(checksPath+'/data/100spheres')
# 100 was not enough to have reasonable number of collisions, so I put 200 spheres.
O.bodies.append([sphere(s[0]+Vector3(0.0,0.2,0.0),s[1]) for s in sp])
O.bodies.append([sphere(s[0]+Vector3(0.1,0.3,0.0),s[1]) for s in sp])
O.dt=5e-4
O.usesTimeStepper=False
newton=NewtonIntegrator(damping=0.6,gravity=(0,-10,0))
O.engines=[
ForceResetter(),
#(1) This is where we allow big bodies, else it would crash due to the very large bottom box:
InsertionSortCollider([Bo1_Box_Aabb(),Bo1_Sphere_Aabb()],allowBiggerThanPeriod=True,targetInterv=50),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
newton
]
testedCollider=typedEngine("InsertionSortCollider")
O.run( 500, True); results[usePeriod][0]=testedCollider.dumpBounds()
O.run(1000, True); results[usePeriod][1]=testedCollider.dumpBounds()
#O.run( 500, True); results[usePeriod][2]=testedCollider.dumpBounds()
O.reset()
#### these text files have too high precision, and get too big. I think that 8 decimal places should be good to avoid any numerical errors arising on different architectures.
# textFile=open("Output123___n.txt", "w");textFile.write(str([results[False][0],results[False][1],results[False][2]]));textFile.close()
# textFile=open("Output123___p.txt", "w");textFile.write(str([results[True ][0],results[True ][1],results[True ][2]]));textFile.close()
resultFile=None
# careful, I used this loop to save the reference results in git revision 2bc5ac90b. When doing tests it must be readonly, and loading=True
loading=True
if(loading):
resultFile=open( checksPath+'/data/checkColider.txt', "r" )
else:
resultFile=open( checksPath+'/data/checkColider.txt', "w" )
lineCount=0
for per in sorted(results):
for result in results[per]:
for record in result:
for tupl in record:
# contents of this tuple is explained in file InsertionSortCollider.cpp line 518, function boost::python::tuple InsertionSortCollider::dumpBounds();
for number in tupl:
lineCount+=1
if(loading):
line = resultFile.readline()
tmp = float(line)
if(abs(tmp - number) > 1e-8):
raise YadeCheckError("InsertionSortCollider check failed in file scripts/checks-and-tests/checks/data/checkColider.txt line: %d"%lineCount)
else:
if(type(number) is int):
resultFile.write(str(number)+'\n')
else:
resultFile.write("%.8f"%number+'\n')
| cosurgi/trunk | scripts/checks-and-tests/checks/checkColliderConstantness.py | Python | gpl-2.0 | 4,071 |
import base
import unittest
class Test( base.BaseScriptTest, unittest.TestCase ):
command_line = "./scripts/line_select.py ${features}"
input_features = base.TestFile( """0
1
1
0
1
0""" )
input_stdin = base.TestFile( """a
b
d
e
f""" )
output_stdout = base.TestFile( """b
e""" )
| uhjish/bx-python | script_tests/line_select_tests.py | Python | mit | 674 |
from setuptools import setup, find_packages
setup(
name='pulp_ostree_common',
version='1.0.0a1',
packages=find_packages(),
url='http://www.pulpproject.org',
license='GPLv2+',
author='Pulp Team',
author_email='pulp-list@redhat.com',
description='common code for pulp\'s ostree support',
)
| dkliban/pulp_ostree | common/setup.py | Python | gpl-2.0 | 321 |
from jsg import Document, CompoundDocument
from jsg.fields import StringField
from utils import check_schema, schema
def test_compound_doc(schema):
@schema.add()
class A(Document):
a = StringField()
@schema.add()
class B(Document):
b = StringField()
@schema.add()
class C(CompoundDocument):
one_of = ["A", "B"]
check_schema(schema, "C", {
'$schema': 'http://json-schema.org/draft-04/schema#',
'definitions': {
'A': {
'type': 'object',
'properties': {
'a': {
'type': 'string',
}
},
},
'B': {
'type': 'object',
'properties': {
'b': {
'type': 'string',
}
},
},
'C': {
'oneOf': [
{
'$ref': '#/definitions/A',
},
{
'$ref': '#/definitions/B',
},
]
}
},
'$ref': '#/definitions/C',
})
def test_compound_doc_two_types(schema):
@schema.add()
class A(Document):
a = StringField()
@schema.add()
class B(Document):
b = StringField()
@schema.add()
class C(CompoundDocument):
one_of = ["A", "B"]
any_of = ["A", "B"]
check_schema(schema, "C", {
'$schema': 'http://json-schema.org/draft-04/schema#',
'definitions': {
'A': {
'type': 'object',
'properties': {
'a': {
'type': 'string',
}
},
},
'B': {
'type': 'object',
'properties': {
'b': {
'type': 'string',
}
},
},
'C': {
'anyOf': [
{
'$ref': '#/definitions/A',
},
{
'$ref': '#/definitions/B',
},
],
'oneOf': [
{
'$ref': '#/definitions/A',
},
{
'$ref': '#/definitions/B',
},
]
}
},
'$ref': '#/definitions/C',
})
| pbutler/jsg | tests/test_compound_doc.py | Python | mit | 2,614 |
import numpy as np
from ctypes import c_int, c_double, c_double, c_bool, c_float, c_char_p, c_void_p
import ctypes
import os
LIB_PATH = os.path.dirname( os.path.realpath(__file__) )
def recompile(path):
print( path )
dir_bak = os.getcwd()
os.chdir( path)
os.system("make clean" )
os.system("make" )
os.chdir( dir_bak )
print( os.getcwd() )
def init():
LIB_PATH_CPP = os.path.normpath(LIB_PATH+'../../../'+'/cpp/Build/libs/libFlight')
recompile(LIB_PATH_CPP)
return ctypes.CDLL( LIB_PATH_CPP+"/libFlight.so" )
def initViewLib():
LIB_PATH_CPP = os.path.normpath(LIB_PATH+'../../../'+'/cpp/Build/libs_SDL/FlightView')
recompile(LIB_PATH_CPP)
return ctypes.CDLL( LIB_PATH_CPP+"/libFlightView.so" )
# =========== main
lib = init()
libView = None
array1ui = np.ctypeslib.ndpointer(dtype=np.uint32, ndim=1, flags='CONTIGUOUS')
array1i = np.ctypeslib.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array2i = np.ctypeslib.ndpointer(dtype=np.int32, ndim=2, flags='CONTIGUOUS')
array1d = np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
array2d = np.ctypeslib.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS')
array3d = np.ctypeslib.ndpointer(dtype=np.double, ndim=3, flags='CONTIGUOUS')
#char_ptr = ctypes.POINTER(c_char )
double_ptr = ctypes.POINTER(c_double)
# ========= C functions
#void loadFromFile( char* fname )
lib.loadFromFile.argtypes = [c_char_p]
lib.loadFromFile.restype = None
def loadFromFile( fname ):
lib.loadFromFile( fname )
#void setPose( double* pos, double* vel, double* rot )
lib.setPose.argtypes = [array1d,array1d,array2d]
lib.setPose.restype = None
def setPose( pos, vel, rot ):
lib.setPose( pos, vel, rot )
#void setTilt( int iwing, double tilt ){
lib.setTilt.argtypes = [c_int,c_double]
lib.setTilt.restype = None
def setTilt( iwing, tilt ):
lib.setTilt( iwing, tilt )
#void fly( int n, int nsub, double dt, double* pos_, double* vel_, double* rot_ )
lib.fly.argtypes = [c_int, c_int, c_double, array2d,array2d,array3d]
lib.fly.restype = None
def fly( pos, vel, rot, nsub=10, dt=0.01 ):
n = len(pos)
lib.fly( n, nsub, dt, pos, vel, rot )
#void flyAndShootTargets( int nsub, double dt, double* controlBuff, double* stateBuff, int* targetShot ){
lib.flyAndShootTargets.argtypes = [c_int, c_double, array1d, array1d, array2d]
lib.flyAndShootTargets.restype = None
def flyAndShootTargets( controlBuff, stateBuff, targetHits, nsub=10, dt=0.01 ):
lib.flyAndShootTargets( nsub, dt, controlBuff, stateBuff, targetHits )
#void setTargets( int nsub, double dt, double* controlBuff, double* stateBuff, int* targetShot ){
lib.setTargets.argtypes = [c_int, array2d ]
lib.setTargets.restype = None
def setTargets( targets ):
lib.setTargets( len(targets), targets )
#void setTargets( int nsub, double dt, double* controlBuff, double* stateBuff, int* targetShot ){
lib.getWorldPointer.argtypes = []
lib.getWorldPointer.restype = c_void_p
def getWorldPointer():
return lib.getWorldPointer()
# ===== view
class FlightView():
def __init__(self, work_dir, wh=(800,600) ):
#void init( int w, int h, void* craft1_, void* bursts_){
self.libSDL = ctypes.CDLL( "/usr/lib/x86_64-linux-gnu/libSDL2.so", ctypes.RTLD_GLOBAL )
#self.libGL = ctypes.CDLL( "/usr/lib/x86_64-linux-gnu/libGL.so", ctypes.RTLD_GLOBAL )
self.libGL = ctypes.CDLL( "/usr/lib32/nvidia-384/libGL.so", ctypes.RTLD_GLOBAL )
#self.libSDL = ctypes.CDLL( "libSDL2.so", ctypes.RTLD_GLOBAL )
#self.libGL = ctypes.CDLL( "libGL.so", ctypes.RTLD_GLOBAL )
self.lib = initViewLib()
print "========= libView ", libView
self.lib.init.argtypes = [ c_int, c_int, c_void_p, c_char_p ]
self.lib.init.restype = None
self.lib.init( wh[0], wh[1], lib.getWorldPointer(), work_dir )
#void fly( int n, int nsub, double dt, double* pos_, double* vel_, double* rot_ )
self.lib.draw.argtypes = []
self.lib.draw.restype = None
def draw(self):
self.lib.draw()
| ProkopHapala/SimpleSimulationEngine | python/pyFlight/c_interface.py | Python | mit | 4,121 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyParameters(Model):
"""Parameters that define the representation of topology.
:param target_resource_group_name: The name of the target resource group
to perform topology on.
:type target_resource_group_name: str
:param target_virtual_network: The reference of the Virtual Network
resource.
:type target_virtual_network:
~azure.mgmt.network.v2017_11_01.models.SubResource
:param target_subnet: The reference of the Subnet resource.
:type target_subnet: ~azure.mgmt.network.v2017_11_01.models.SubResource
"""
_attribute_map = {
'target_resource_group_name': {'key': 'targetResourceGroupName', 'type': 'str'},
'target_virtual_network': {'key': 'targetVirtualNetwork', 'type': 'SubResource'},
'target_subnet': {'key': 'targetSubnet', 'type': 'SubResource'},
}
def __init__(self, target_resource_group_name=None, target_virtual_network=None, target_subnet=None):
super(TopologyParameters, self).__init__()
self.target_resource_group_name = target_resource_group_name
self.target_virtual_network = target_virtual_network
self.target_subnet = target_subnet
| AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/topology_parameters.py | Python | mit | 1,697 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'ohmanizer.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| bryndivey/ohmanizer | ohmanizer/urls.py | Python | apache-2.0 | 278 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Hagay Onn
# Date: 15 April 2017
# Contact: hagayo@gmail.com
# Summary: Stack class that supports pushing/popping objects as well as getting last/max object in O(1)
from stack import Stack
__author__ = 'Hagay Onn'
__version__ = '1.0'
class MaxStack(Stack):
def __init__(self):
super(MaxStack, self).__init__()
self._max_list = [] # list storing the stack's max values
def push(self, item):
"""push/put new object into the stack"""
super(MaxStack, self).push(item)
if (self._max_list == []) or (self._max_list[0] <= item):
self._max_list.insert(0, item)
return
def pop(self):
"""gets the last object pushed to stack and remove it from stack"""
if self.is_empty():
return None
if (self.get_last() == self.get_max()):
self._max_list.pop(0)
return super(MaxStack, self).pop()
def get_max(self):
"""gets the maximal object in the stack currently, without popping"""
if (self._max_list == []): # or (self._max_list is None)
return None
return self._max_list[0]
# Nice to have for debugging, etc.
def __str__(self):
"""returns a human friendly representation of the stack"""
print_str = super(MaxStack, self).__str__()
print_str += ", Max={0}".format(str(self.get_max()))
return print_str
def print(self):
"""prints the stack in a human friendly format"""
print(self.__str__())
# for playing with the MaxStack functionality in the terminal
if __name__ == '__main__':
testStack = MaxStack()
testStack.print()
testStack.push(4)
testStack.print()
testStack.push(7)
testStack.print()
testStack.push(5)
testStack.print()
testStack.push(5)
testStack.print()
testStack.push(7)
testStack.print()
testStack.push(8)
testStack.print()
testStack.push(1)
testStack.print()
testStack.push(3)
testStack.print()
testStack.push(123)
testStack.print()
print(" Popped out 1: {}".format(str(testStack.pop())))
testStack.print()
print(" Popped out 2: {}".format(str(testStack.pop())))
testStack.print()
print(" Popped out 3: {}".format(str(testStack.pop())))
testStack.print()
print(" Popped out 4: {}".format(str(testStack.pop())))
testStack.print()
pass
| hagayo/MaxStack_o1_Python | max_stack.py | Python | mit | 2,529 |
#! /usr/bin/python
from __future__ import division
from pytronica import *
c = Chain()
c.add(Sine(5000).take(1), 1)
c.add(Sine(6000).take(1), 1)
c.add(Sine(7000).take(1), 1)
c.add(Sine(8000).take(1), 1)
c.add(Sine(9000).take(1), 1)
c.add(Sine(10000).take(1), 1)
c.add(Sine(11000).take(1), 1)
c.add(Sine(12000).take(1), 1)
c.add(Sine(13000).take(1), 1)
c.add(Sine(14000).take(1), 1)
c.add(Sine(15000).take(1), 1)
c.add(Sine(16000).take(1), 1)
c.add(Sine(17000).take(1), 1)
c.add(Sine(18000).take(1), 1)
c.add(Sine(19000).take(1), 1)
c.add(Sine(22000).take(1), 1)
c *= .5
c.play()
#c.audacity()
| chriswatrous/pytronica | songs/experiments/high_freq.py | Python | gpl-2.0 | 596 |
"""
Sensor for Crime Reports.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.crimereports/
"""
from collections import defaultdict
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_INCLUDE, CONF_EXCLUDE, CONF_NAME, CONF_LATITUDE, CONF_LONGITUDE,
ATTR_ATTRIBUTION, ATTR_LATITUDE, ATTR_LONGITUDE,
LENGTH_KILOMETERS, LENGTH_METERS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
from homeassistant.util.distance import convert
from homeassistant.util.dt import now
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['crimereports==1.0.0']
_LOGGER = logging.getLogger(__name__)
CONF_RADIUS = 'radius'
DOMAIN = 'crimereports'
EVENT_INCIDENT = '{}_incident'.format(DOMAIN)
SCAN_INTERVAL = timedelta(minutes=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_RADIUS): vol.Coerce(float),
vol.Inclusive(CONF_LATITUDE, 'coordinates'): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, 'coordinates'): cv.longitude,
vol.Optional(CONF_INCLUDE): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EXCLUDE): vol.All(cv.ensure_list, [cv.string])
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Crime Reports platform."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
name = config.get(CONF_NAME)
radius = config.get(CONF_RADIUS)
include = config.get(CONF_INCLUDE)
exclude = config.get(CONF_EXCLUDE)
add_devices([CrimeReportsSensor(
hass, name, latitude, longitude, radius, include, exclude)], True)
class CrimeReportsSensor(Entity):
"""Crime Reports Sensor."""
def __init__(self, hass, name, latitude, longitude, radius,
include, exclude):
"""Initialize the sensor."""
import crimereports
self._hass = hass
self._name = name
self._include = include
self._exclude = exclude
radius_kilometers = convert(radius, LENGTH_METERS, LENGTH_KILOMETERS)
self._crimereports = crimereports.CrimeReports(
(latitude, longitude), radius_kilometers)
self._attributes = None
self._state = None
self._previous_incidents = set()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def _incident_event(self, incident):
data = {
'type': incident.get('type'),
'description': incident.get('friendly_description'),
'timestamp': incident.get('timestamp'),
'location': incident.get('location')
}
if incident.get('coordinates'):
data.update({
ATTR_LATITUDE: incident.get('coordinates')[0],
ATTR_LONGITUDE: incident.get('coordinates')[1]
})
self._hass.bus.fire(EVENT_INCIDENT, data)
def update(self):
"""Update device state."""
import crimereports
incident_counts = defaultdict(int)
incidents = self._crimereports.get_incidents(
now().date(), include=self._include, exclude=self._exclude)
fire_events = len(self._previous_incidents) > 0
if len(incidents) < len(self._previous_incidents):
self._previous_incidents = set()
for incident in incidents:
incident_type = slugify(incident.get('type'))
incident_counts[incident_type] += 1
if (fire_events and incident.get('id')
not in self._previous_incidents):
self._incident_event(incident)
self._previous_incidents.add(incident.get('id'))
self._attributes = {
ATTR_ATTRIBUTION: crimereports.ATTRIBUTION
}
self._attributes.update(incident_counts)
self._state = len(incidents)
| MungoRae/home-assistant | homeassistant/components/sensor/crimereports.py | Python | apache-2.0 | 4,382 |
from .headers import (
headers, FakeChromeUA)
from .conf import (
get_db_args, get_redis_args, get_timeout, get_crawl_interal, get_excp_interal,
get_max_repost_page, get_max_search_page, get_max_home_page, get_max_comment_page,
get_max_retries, get_broker_and_backend, get_redis_master, get_code_username, get_code_password,
get_running_mode, get_crawling_mode, get_share_host_count, get_cookie_expire_time, get_email_args,
get_images_allow, get_images_path, get_images_type, get_time_after, get_samefollow_uid
) | ResolveWang/WeiboSpider | config/__init__.py | Python | mit | 537 |
# coding: UTF-8
# Name: 表达式元素
# Author: LYC
# Created: 2014-04-03
import re
from gpcalccfg import OPRegex
class ElementType(object):
"""
表达式元素类型
"""
def __init__(self, name, regex):
super(ElementType, self).__init__()
self.name = name
self.regex = regex
def __call__(self, obj_str):
match = self.regex.search(obj_str)#使用对应的regex来匹配
if match:return match.group()
return None
def __str__(self):
return self.name
def __repr__(self):
return self.name
class ElementTypeEnum(object):
"""
表达式元素类型枚举
"""
UOP = ElementType("UOP", OPRegex.UOPRegex)#单目运算符
BOP = ElementType("BOP", OPRegex.BOPRegex)#二目运算符
VAR = ElementType("VAR", OPRegex.VARRegex)#合法变量
LBK = ElementType("LBK", OPRegex.LBKRegex)#左括号
RBK = ElementType("RBK", OPRegex.RBKRegex)#右括号
NUM = ElementType("NUM", OPRegex.NUMRegex)#数字
NON = ElementType("NON", OPRegex.NONRegex)#无
class Element(object):
"""
表达式元素
"""
def __init__(self, value, e_type):
super(Element, self).__init__()
self.value = value
self.type = e_type
def __str__(self):
return str(self.value)
def __repr__(self):
return str(self)
| MrLYC/GPCalc | GPCalc/expelement.py | Python | gpl-2.0 | 1,370 |
# -*- coding: utf-8 -*-
from abc import ABCMeta,abstractmethod
CONTAINER_STATE_ACTIVE = 1
CONTAINER_STATE_INACTIVE = 2
CONTAINER_STATE_MIGRATING = 3
CONTAINER_STATE_PREPARE = 4
class Container(object):
hostId = None
pid = None
id = None
mac = None
netnsId = None
image = None
dataDirectory = ''
createTime = None
switch = None
state = CONTAINER_STATE_PREPARE
belongsTo = None
servicePort = -1
privateIp=None
backMac = None
def toJson(self):
pass
@classmethod
def parseFromJson(cls):
pass
| onlysheep5200/NetnsEx | lib/container.py | Python | apache-2.0 | 582 |
import collections
from supriya import CalculationRate, SignalRange
from supriya.ugens.UGen import UGen
class MouseY(UGen):
"""
A mouse cursor tracker.
MouseY tracks the y-axis of the mouse cursor position.
::
>>> supriya.ugens.MouseY.kr()
MouseY.kr()
"""
### CLASS VARIABLES ###
__documentation_section__ = "User Interaction UGens"
_ordered_input_names = collections.OrderedDict(
[("minimum", 0), ("maximum", 1), ("warp", 0), ("lag", 0.2)]
)
_signal_range = SignalRange.UNIPOLAR
_valid_calculation_rates = (CalculationRate.CONTROL,)
| Pulgama/supriya | supriya/ugens/MouseY.py | Python | mit | 614 |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.unit.utils.format_call_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test `salt.utils.format_call`
'''
# Import python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import format_call
from salt.exceptions import SaltInvocationError
class TestFormatCall(TestCase):
def test_simple_args_passing(self):
def foo(one, two=2, three=3):
pass
self.assertEqual(
format_call(foo, dict(one=10, two=20, three=30)),
{'args': [10], 'kwargs': dict(two=20, three=30)}
)
self.assertEqual(
format_call(foo, dict(one=10, two=20)),
{'args': [10], 'kwargs': dict(two=20, three=3)}
)
self.assertEqual(
format_call(foo, dict(one=2)),
{'args': [2], 'kwargs': dict(two=2, three=3)}
)
def test_mimic_typeerror_exceptions(self):
def foo(one, two=2, three=3):
pass
def foo2(one, two, three=3):
pass
with self.assertRaisesRegexp(
SaltInvocationError,
r'foo takes at least 1 argument \(0 given\)'):
format_call(foo, dict(two=3))
with self.assertRaisesRegexp(
TypeError,
r'foo2 takes at least 2 arguments \(1 given\)'):
format_call(foo2, dict(one=1))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestFormatCall, needs_daemon=False)
| stephane-martin/salt-debian-packaging | salt-2016.3.3/tests/unit/utils/format_call_test.py | Python | apache-2.0 | 1,713 |
__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from cbc.twist import *
from sys import argv
""" DEMO - Twisting of a hyperelastic cube """
class Twist(StaticHyperelasticity):
""" Definition of the hyperelastic problem """
def mesh(self):
n = 8
return UnitCubeMesh(n, n, n)
# Setting up dirichlet conditions and boundaries
def dirichlet_values(self):
clamp = Expression(("0.0", "0.0", "0.0"))
twist = Expression(("0.0",
"y0 + (x[1] - y0) * cos(theta) - (x[2] - z0) * sin(theta) - x[1]",
"z0 + (x[1] - y0) * sin(theta) + (x[2] - z0) * cos(theta) - x[2]"),
y0=0.5, z0=0.5, theta=pi/6)
return [clamp, twist]
def dirichlet_boundaries(self):
left = "x[0] == 0.0"
right = "x[0] == 1.0"
return [left, right]
# List of material models
def material_model(self):
# Material parameters can either be numbers or spatially
# varying fields. For example,
mu = 3.8461
lmbda = Expression("x[0]*5.8 + (1 - x[0])*5.7")
C10 = 0.171; C01 = 4.89e-3; C20 = -2.4e-4; C30 = 5.e-4
delka = 1.0/sqrt(2.0)
M = Constant((0.0,1.0,0.0))
k1 = 1e2; k2 = 1e1
materials = []
materials.append(MooneyRivlin({'C1':mu/2, 'C2':mu/2, 'bulk':lmbda}))
materials.append(StVenantKirchhoff({'mu':mu, 'bulk':lmbda}))
materials.append(neoHookean({'half_nkT':mu, 'bulk':lmbda}))
materials.append(Isihara({'C10':C10,'C01':C01,'C20':C20,'bulk':lmbda}))
materials.append(Biderman({'C10':C10,'C01':C01,'C20':C20,'C30':C30,'bulk':lmbda}))
materials.append(AnisoTest({'mu1':mu,'mu2':2*mu,'M':M,'bulk':lmbda}))
materials.append(GasserHolzapfelOgden({'mu':mu,'k1':k1,'k2':k2,'M':M,'bulk':lmbda}))
materials.append(Ogden({'alpha1':1.3,'alpha2':5.0,'alpha3':-2.0,\
'mu1':6.3e5,'mu2':0.012e5,'mu3':-0.1e5}))
try:
index = int(argv[1])
except:
index = 2
print str(materials[index])
return materials[index]
def name_method(self, method):
self.method = method
def __str__(self):
return "A hyperelastic cube twisted by 30 degrees solved by " + self.method
# Setup the problem
twist = Twist()
twist.name_method("DISPLACEMENT BASED FORMULATION")
# Solve the problem
print twist
twist.solve()
| hnarayanan/twist | demo/static/twist.py | Python | gpl-3.0 | 2,606 |
import sys, os, socket, atexit, re
import pkg_resources
from pkg_resources import ResolutionError, ExtractionError
from setuptools.compat import urllib2
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
""".strip().split()
HTTPSHandler = HTTPSConnection = object
for what, where in (
('HTTPSHandler', ['urllib2','urllib.request']),
('HTTPSConnection', ['httplib', 'http.client']),
):
for module in where:
try:
exec("from %s import %s" % (module, what))
except ImportError:
pass
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from socket import create_connection
except ImportError:
_GLOBAL_DEFAULT_TIMEOUT = getattr(socket, '_GLOBAL_DEFAULT_TIMEOUT', object())
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error:
err = True
if sock is not None:
sock.close()
if err:
raise
else:
raise error("getaddrinfo returns an empty list")
try:
from ssl import CertificateError, match_hostname
except ImportError:
class CertificateError(ValueError):
pass
def _dnsname_to_pat(dn):
pats = []
for frag in dn.split(r'.'):
if frag == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = create_connection(
(self.host, self.port), getattr(self,'source_address',None)
)
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib2.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
_wincerts = None
def get_win_certfile():
global _wincerts
if _wincerts is not None:
return _wincerts.name
try:
from wincertstore import CertFile
except ImportError:
return None
class MyCertFile(CertFile):
def __init__(self, stores=(), certs=()):
CertFile.__init__(self)
for store in stores:
self.addstore(store)
self.addcerts(certs)
atexit.register(self.close)
_wincerts = MyCertFile(stores=['CA', 'ROOT'])
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
if os.name=='nt':
return get_win_certfile()
else:
for cert_path in cert_paths:
if os.path.isfile(cert_path):
return cert_path
try:
return pkg_resources.resource_filename('certifi', 'cacert.pem')
except (ImportError, ResolutionError, ExtractionError):
return None
| c0710204/mirrorsBistu | pypi/bandersnatch/lib/python2.7/site-packages/setuptools/ssl_support.py | Python | mit | 7,288 |
#
# Copyright 2014 Mingyuan Xia (http://mxia.me) and others
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributors:
# Mingyuan Xia
# Xinye Lin
# Ran Shu
#
import re
class CellularAgent:
def __init__(self, device):
self.device = device
def turnOnCellularData(self):
""" Turn on cellular data service (need root access)
"""
try:
self.device.shell('su')
self.device.shell('svc data enable')
return True
except:
print("Failed to turn on the cellular data.")
return False
def turnOffCellularData(self):
""" Turn off cellular data service (need root access)
"""
try:
self.device.shell('su')
self.device.shell('svc data disable')
return True
except:
print("Failed to turn off the cellular data.")
return False
def getCellularDataStatus(self):
""" Report the current data service status
Return True when the data service is on
"""
sysAgent = SystemStatusAgent(self.device)
return sysAgent.getCellularDataStatus() == '2'
def toggleCellularDataStatus(self):
if self.getCellularDataStatus():
self.turnOffCellularData()
else:
self.turnOnCellularData()
class LogcatAgent:
MAIN = 'main'
EVENTS = 'events'
RADIO = 'radio'
""" LogcatAgent controls logcat, the logging facility of Android"""
def __init__(self, device):
""" Initialize the agent with a given device
@param device: should be an EMonkeyDevice
"""
self.device = device
def logcat(self, args):
""" Send a raw logcat command and return its output"""
s = "logcat"
for arg in args:
s += ' ' + arg
return self.device.shell(s).encode('utf-8')
def clear(self):
""" Clear the logcat logs"""
self.logcat(['-c'])
def dumpBuf(self, buf=MAIN):
return self.logcat(['-b ', buf])
def dump(self, fmt=None, filterTuples=[]):
""" Dump the logcat logs with given filters and formats
@param fmt: the output format
@param filterTuples: a list of (TAG,LEVEL) tuples that specify filtering
according to Android doc, LEVEL could be:
V - Verbose (lowest priority)
D - Debug
I - Info
W - Warning
E - Error
F - Fatal
S - Silent (highest priority, on which nothing is ever printed)
"""
cmd = ['-d']
if fmt is not None:
cmd.append('-v')
cmd.append(fmt)
for tp in filterTuples:
cmd.append('%s:%s' % tp)
return self.logcat(cmd)
class ScreenAgent:
def __init__(self, device):
self.device = device
self.METHOD_CHANGE_ORIENTATION = r'testChangeOrientation'
self.METHOD_CHANGE_RIGHT_DOWN = r'testChangeRightDown'
self.METHOD_CHANGE_LEFT_DOWN = r'testChangeLeftDown'
self.METHOD_FREEZE_ROTATION = r'testFreezeRotation'
self.METHOD_UNFREEZE_ROTATION = r'testUnfreezeRotation'
self.METHOD_TOGGLE_SCREEN = r'testToggleScreen'
def getScreenRotationStatus(self):
sysAgent = SystemStatusAgent(self.device)
return sysAgent.getScreenRotationStatus()
def getOrientation(self):
sysAgent = SystemStatusAgent(self.device)
return sysAgent.getOrientation()
def changeOrientation(self):
sysAgent = SystemStatusAgent(self.device)
current = sysAgent.getOrientation()
sysAgent.testAndroidJarMethod(self.METHOD_CHANGE_ORIENTATION)
newStatus = sysAgent.getOrientation()
if current != newStatus:
return True
else:
return False
def changeRightDown(self):
sysAgent = SystemStatusAgent(self.device)
current = sysAgent.getOrientation()
sysAgent.testAndroidJarMethod(self.METHOD_CHANGE_RIGHT_DOWN)
newStatus = sysAgent.getOrientation()
if current != newStatus:
return True
else:
return False
def changeLeftDown(self):
sysAgent = SystemStatusAgent(self.device)
current = sysAgent.getOrientation()
sysAgent.testAndroidJarMethod(self.METHOD_CHANGE_LEFT_DOWN)
newStatus = sysAgent.getOrientation()
if current != newStatus:
return True
else:
return False
def freezeRotation(self):
sysAgent = SystemStatusAgent(self.device)
sysAgent.testAndroidJarMethod(self.METHOD_FREEZE_ROTATION)
return sysAgent.getScreenRotationStatus() == 1
def unfreezeRotation(self):
sysAgent = SystemStatusAgent(self.device)
sysAgent.testAndroidJarMethod(self.METHOD_UNFREEZE_ROTATION)
return sysAgent.getScreenRotationStatus() == 0
def toggleScreen(self):
sysAgent = SystemStatusAgent(self.device)
current = sysAgent.getScreenOnOffStatus()
sysAgent.testAndroidJarMethod(self.METHOD_TOGGLE_SCREEN)
return sysAgent.getScreenOnOffStatus() == current
class SnapshotAgent:
def __init__(self, device):
self.device = device
def takeSnapshot(self):
''' Return a snapshot object
'''
return self.device.takeSnapshot()
def saveSnapshot(self, snapshot, fileName):
''' Save a snapshot object to a png file
'''
snapshot.writeToFile(fileName, 'png')
def compareSnapshots(self, snapshot1, snapshot2):
''' Check if two snapshot objects are the same
'''
return snapshot1.sameAs(snapshot2, 1)
def takeAndCompareSnapshots(self, snapshotCheck):
''' Take a snapshot and check if it is the same as another one
'''
return self.compareSnapshots(self.takeSnapshot(), snapshotCheck)
def getSubSnapshot(self, snapshot, coordinates):
''' Get a region from a snapshort
'''
return snapshot.getSubImage(coordinates)
def loadSnapshot(self, fileName):
''' Load a snapshot object from a png file
'''
return self.device.loadImageFromFile(fileName)
class KeypressAgent:
def __init__(self, device):
self.device = device
self.METHOD_PRESS_BACK = r'testPressBack'
self.METHOD_PRESS_HOME = r'testPressHome'
self.METHOD_CLICK_XY = r'testClick'
self.METHOD_DRAG = r'testDrag'
def pressBack(self):
sysAgent = SystemStatusAgent(self.device)
sysAgent.testAndroidJarMethod(self.METHOD_PRESS_BACK)
def pressHome(self):
sysAgent = SystemStatusAgent(self.device)
sysAgent.testAndroidJarMethod(self.METHOD_PRESS_HOME)
def clickXY(self, x, y):
sysAgent = SystemStatusAgent(self.device)
sysAgent.testAndroidJarMethod(
self.METHOD_CLICK_XY + ' -e x ' + str(x) + ' -e y ' + str(y))
def drag(self, startX, startY, endX, endY, steps):
sysAgent = SystemStatusAgent(self.device)
sysAgent.testAndroidJarMethod(self.METHOD_DRAG + ' -e startX ' + str(startX) + ' -e startY ' + str(
startY) + ' -e endX ' + str(endX) + ' -e endY ' + str(endY) + ' -e steps ' + str(steps))
class WifiAgent:
def __init__(self, device):
self.device = device
def turnOnWifi(self):
""" Need root access
"""
try:
self.device.shell('su')
self.device.shell('svc wifi enable')
return True
except:
print("Failed to turn on the Wifi.")
return False
def turnOffWifi(self):
""" Need root access
"""
try:
self.device.shell('su')
self.device.shell('svc wifi disable')
return True
except:
print("Failed to turn off the Wifi.")
return False
def getWiFiStatus(self):
sysAgent = SystemStatusAgent(self.device)
return sysAgent.getWifiStatus()
def changeWifiStatus(self):
status = self.getWiFiStatus()
if status == 'enabled':
return self.turnOffWifi()
elif status == 'disabled':
return self.turnOnWifi()
else:
print("Wifi status unchangable for now.")
return False
class SystemStatusAgent:
def __init__(self, device):
self.device = device
self.SCRIPT_PATH = r'/sdcard/'
self.TEST_JAR = r'AndroidTest.jar'
self.TEST_PACKAGE = r'edu.mcgill.lynxiayel.androidtest'
self.TEST_CLASS = r'AndroidTest'
self.UIAUTOMATOR_TEST_PREFIX = 'uiautomator runtest ' + self.SCRIPT_PATH + \
self.TEST_JAR + ' -c ' + self.TEST_PACKAGE + \
'.' + self.TEST_CLASS + '#'
def getWifiStatus(self):
"""Possible status:
disabled | connected | enabled | disconnected
"""
msg = self.device.shell("dumpsys wifi").encode('utf-8')
pat = re.compile(r'^Wi-Fi is (\w*)')
try:
status = pat.findall(msg)[0]
if status != "":
return status
else:
raise Exception()
except Exception:
print("Fail to acquire WiFi status!")
return False
def getCellularDataStatus(self):
"""Possible status:
0 - DATA_DISCONNECTED (Disconnected. IP traffic not available. )
1 - DATA_CONNECTING(Currently setting up a data connection.)
2 - DATA_CONNECTED (Connected. IP traffic should be available.)
3 - DATA_SUSPENDED (Suspended. The connection is up, but IP traffic is temporarily unavailable.
For example, in a 2G network, data activity may be suspended when a voice call arrives.)
"""
msg = self.device.shell('dumpsys telephony.registry').encode('utf-8')
pat = re.compile(r'mDataConnectionState=([0-3])')
try:
status = pat.findall(msg)[0]
if status in ['0', '1', '2', '3']:
return status
else:
raise Exception()
except Exception:
print("Fail to acquire Cellular data connection status!")
return False
def getScreenRotationStatus(self):
"""Possible status
1 - Rotation locked
0 - Auto Rotation
"""
msg = self.device.shell('dumpsys window').encode('utf-8')
pat = re.compile(r'mUserRotationMode=([01])')
try:
status = pat.findall(msg)[0]
if status in ['0', '1']:
return status
else:
raise Exception()
except Exception:
print("Fail to acquire screen rotation status!")
return False
def getScreenOnOffStatus(self):
"""Possible status
True - On
False - Off
"""
msg = self.device.shell('dumpsys power').encode('utf-8')
pat = re.compile(r'mScreenOn=(true|false)')
try:
status = pat.findall(msg)[0]
if status in ['true', 'false']:
return status == 'true'
else:
raise Exception()
except Exception:
print("Fail to acquire screen On/Off status!")
def getOrientation(self):
"""Possible status:
0 - portrait
1 - landscape (left side down)
2 - portrait (upside down)
3 - landscape (right side down)
"""
msg = self.device.shell('dumpsys display').encode('utf-8')
pat = re.compile(r'mCurrentOrientation=([0-3])')
try:
status = pat.findall(msg)[0]
if status in ['0', '1', '2', '3']:
return status
else:
raise Exception()
except Exception:
print("Fail to acquire screen orientation!")
return False
def getBatteryLevel(self):
"""return the remaining percentage of battery
"""
msg = self.device.shell('dumpsys battery').encode('utf-8')
pat = re.compile(r'level: (\d*)')
try:
status = pat.findall(msg)[0]
if 0 <= int(status) <= 100:
return status
else:
raise Exception()
except Exception:
print("Fail to acquire the battery level!")
return False
def hasFile(self, fileName):
try:
if not fileName:
raise ValueError('File name is empty!')
msg = self.device.shell(
'ls ' + self.SCRIPT_PATH + fileName).encode('utf-8')
fileName = re.escape(fileName)
pat = re.compile(r'(' + fileName + r')')
result = pat.findall(msg)[0]
if result:
return True
else:
return False
except Exception, e:
if isinstance(e, ValueError):
print(e.message)
else:
print("Checking file existence failed!")
return False
def hasTestScript(self):
return self.hasFile(self.TEST_JAR)
def pushTestScript(self):
return self.pushFile(self.TEST_JAR)
def prepareScript(self):
if not self.hasTestScript():
self.pushTestScript()
else:
pass
def testAndroidJarMethod(self, methodName):
self.prepareScript()
self.device.shell(
self.UIAUTOMATOR_TEST_PREFIX + methodName).encode('utf-8')
# TODO need to fx pushFile and PullFile in first
def pushFile(self, fileName):
return self.device.pushFile(self.SCRIPT_PATH + fileName)
| mcgill-cpslab/MonkeyHelper | src/Agents.py | Python | apache-2.0 | 14,204 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-27 12:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('smartshark', '0029_auto_20161027_1427'),
]
operations = [
migrations.AlterField(
model_name='argument',
name='value',
field=models.CharField(default=None, max_length=300, null=True),
),
]
| smartshark/serverSHARK | smartshark/migrations/0030_auto_20161027_1429.py | Python | apache-2.0 | 480 |
"""Resolwe model database functions."""
from django.db.models.aggregates import Func
from .utils import json_path_components
class JsonGetPath(Func): # pylint: disable=abstract-method
"""PostgreSQL JSON path (#>) operator."""
function = '#>'
template = "%(expressions)s%(function)s%%s"
arity = 1
def __init__(self, expression, path):
"""Initialize function.
:param expression: Expression that returns a JSON field
:param path: Path to get inside the JSON object, which can be
either a list of path components or a dot-separated
string
"""
self.path = json_path_components(path)
super().__init__(expression)
def as_sql(self, compiler, connection): # pylint: disable=arguments-differ
"""Compile SQL for this function."""
sql, params = super().as_sql(compiler, connection)
params.append(self.path)
return sql, params
class JsonbArrayElements(Func): # pylint: disable=abstract-method
"""PostgreSQL jsonb_array_elements function."""
function = 'jsonb_array_elements'
template = '%(function)s(%(expressions)s)'
arity = 1
| jberci/resolwe | resolwe/flow/models/functions.py | Python | apache-2.0 | 1,173 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import urllib
from tempest.api_schema.response.compute import flavors as common_schema
from tempest.api_schema.response.compute import flavors_access as schema_access
from tempest.api_schema.response.compute import flavors_extra_specs \
as schema_extra_specs
from tempest.api_schema.response.compute.v2 import flavors as v2schema
from tempest.common import service_client
class FlavorsClientJSON(service_client.ServiceClient):
def list_flavors(self, params=None):
url = 'flavors'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(common_schema.list_flavors, resp, body)
return resp, body['flavors']
def list_flavors_with_detail(self, params=None):
url = 'flavors/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(v2schema.list_flavors_details, resp, body)
return resp, body['flavors']
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id))
body = json.loads(body)
self.validate_response(v2schema.create_get_flavor_details, resp, body)
return resp, body['flavor']
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
"""Creates a new flavor or instance type."""
post_body = {
'name': name,
'ram': ram,
'vcpus': vcpus,
'disk': disk,
'id': flavor_id,
}
if kwargs.get('ephemeral'):
post_body['OS-FLV-EXT-DATA:ephemeral'] = kwargs.get('ephemeral')
if kwargs.get('swap'):
post_body['swap'] = kwargs.get('swap')
if kwargs.get('rxtx'):
post_body['rxtx_factor'] = kwargs.get('rxtx')
if kwargs.get('is_public'):
post_body['os-flavor-access:is_public'] = kwargs.get('is_public')
post_body = json.dumps({'flavor': post_body})
resp, body = self.post('flavors', post_body)
body = json.loads(body)
self.validate_response(v2schema.create_get_flavor_details, resp, body)
return resp, body['flavor']
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
resp, body = self.delete("flavors/{0}".format(flavor_id))
self.validate_response(v2schema.delete_flavor, resp, body)
return resp, body
def is_resource_deleted(self, id):
# Did not use get_flavor_details(id) for verification as it gives
# 200 ok even for deleted id. LP #981263
# we can remove the loop here and use get by ID when bug gets sortedout
resp, flavors = self.list_flavors_with_detail()
for flavor in flavors:
if flavor['id'] == id:
return False
return True
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'flavor'
def set_flavor_extra_spec(self, flavor_id, specs):
"""Sets extra Specs to the mentioned flavor."""
post_body = json.dumps({'extra_specs': specs})
resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
post_body)
body = json.loads(body)
self.validate_response(schema_extra_specs.flavor_extra_specs,
resp, body)
return resp, body['extra_specs']
def get_flavor_extra_spec(self, flavor_id):
"""Gets extra Specs details of the mentioned flavor."""
resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
body = json.loads(body)
self.validate_response(schema_extra_specs.flavor_extra_specs,
resp, body)
return resp, body['extra_specs']
def get_flavor_extra_spec_with_key(self, flavor_id, key):
"""Gets extra Specs key-value of the mentioned flavor and key."""
resp, body = self.get('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
key))
body = json.loads(body)
self.validate_response(schema_extra_specs.flavor_extra_specs_key,
resp, body)
return resp, body
def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
"""Update specified extra Specs of the mentioned flavor and key."""
resp, body = self.put('flavors/%s/os-extra_specs/%s' %
(flavor_id, key), json.dumps(kwargs))
body = json.loads(body)
self.validate_response(schema_extra_specs.flavor_extra_specs_key,
resp, body)
return resp, body
def unset_flavor_extra_spec(self, flavor_id, key):
"""Unsets extra Specs from the mentioned flavor."""
resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
(str(flavor_id), key))
self.validate_response(v2schema.unset_flavor_extra_specs, resp, body)
return resp, body
def list_flavor_access(self, flavor_id):
"""Gets flavor access information given the flavor id."""
resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return resp, body['flavor_access']
def add_flavor_access(self, flavor_id, tenant_id):
"""Add flavor access for the specified tenant."""
post_body = {
'addTenantAccess': {
'tenant': tenant_id
}
}
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return resp, body['flavor_access']
def remove_flavor_access(self, flavor_id, tenant_id):
"""Remove flavor access from the specified tenant."""
post_body = {
'removeTenantAccess': {
'tenant': tenant_id
}
}
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return resp, body['flavor_access']
| ebagdasa/tempest | tempest/services/compute/json/flavors_client.py | Python | apache-2.0 | 7,252 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import redis, frappe, cPickle as pickle
class RedisWrapper(redis.Redis):
"""Redis client that will automatically prefix conf.db_name"""
def make_key(self, key, user=None):
if user:
if user == True:
user = frappe.session.user
key = "user:{0}:{1}".format(user, key)
return (frappe.conf.db_name + "|" + key).encode('utf-8')
def set_value(self, key, val, user=None):
"""Sets cache value."""
key = self.make_key(key, user)
frappe.local.cache[key] = val
self.set(key, pickle.dumps(val))
def get_value(self, key, generator=None, user=None):
"""Returns cache value. If not found and generator function is
given, it will call the generator.
:param key: Cache key.
:param generator: Function to be called to generate a value if `None` is returned."""
original_key = key
key = self.make_key(key, user)
val = frappe.local.cache.get(key)
if val is None:
val = self.get(key)
if val is not None:
val = pickle.loads(val)
if val is None and generator:
val = generator()
self.set_value(original_key, val, user=user)
else:
frappe.local.cache[key] = val
return val
def get_all(self, key):
ret = {}
for k in self.get_keys(key):
ret[key] = self.get_value(k)
return ret
def get_keys(self, key):
"""Return keys with wildcard `*`."""
return self.keys(self.make_key(key + "*"))
def delete_keys(self, key):
"""Delete keys with wildcard `*`."""
self.delete_value(self.get_keys(key), make_keys=False)
def delete_value(self, keys, user=None, make_keys=True):
"""Delete value, list of values."""
if not isinstance(keys, (list, tuple)):
keys = (keys, )
for key in keys:
if make_keys:
key = self.make_key(key)
self.delete(key)
if key in frappe.local.cache:
del frappe.local.cache[key]
| gangadharkadam/v5_frappe | frappe/utils/redis_wrapper.py | Python | mit | 1,928 |
# Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from Exscript.parselib import Token
from Exscript.interpreter.ExpressionNode import ExpressionNode
class Expression(Token):
def __init__(self, lexer, parser, parent):
Token.__init__(self, 'Expression', lexer, parser, parent)
# Parse the expression.
self.root = ExpressionNode(lexer, parser, parent)
# Reorder the tree according to the operator priorities.
self.prioritize(self.root)
self.mark_end()
def prioritize(self, start, prio = 1):
#print "Prioritizing from", start.op, "with prio", prio, (start.lft, start.rgt)
if prio == 6:
return
root = start
while root is not None and root.priority() <= prio:
root = root.rgt
if root is None:
self.prioritize(start, prio + 1)
return
# Find the next node that has the current priority.
previous = root
current = root.rgt
while current is not None and current.priority() != prio:
previous = current
current = current.rgt
if current is None:
self.prioritize(start, prio + 1)
return
# Reparent the expressions.
#print "Prio of", root.op, 'is higher than', current.op
previous.rgt = current.lft
current.lft = root
# Change the pointer of the parent of the root node.
# If this was the root of the entire tree we need to change that as
# well.
if root.parent_node is None:
self.root = current
elif root.parent_node.lft == root:
root.parent_node.lft = current
elif root.parent_node.rgt == root:
root.parent_node.rgt = current
root.parent_node = current
# Go ahead prioritizing the children.
self.prioritize(current.lft, prio + 1)
self.prioritize(current.rgt, prio)
def value(self, context):
return self.root.value(context)
def dump(self, indent = 0):
print (' ' * indent) + self.name, 'start'
self.root.dump(indent + 1)
print (' ' * indent) + self.name, 'end.'
| mpenning/exscript | src/Exscript/interpreter/Expression.py | Python | gpl-2.0 | 2,830 |
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the author be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
#
# Copyright (c) 2008 Greg Hewgill http://hewgill.com
#
# This has been modified from the original software.
# Copyright (c) 2011 William Grant <me@williamgrant.id.au>
import re
__all__ = [
'CanonicalizationPolicy',
'InvalidCanonicalizationPolicyError',
]
class InvalidCanonicalizationPolicyError(Exception):
"""The c= value could not be parsed."""
pass
def strip_trailing_whitespace(content):
return re.sub(b"[\t ]+\r\n", b"\r\n", content)
def compress_whitespace(content):
return re.sub(b"[\t ]+", b" ", content)
def strip_trailing_lines(content):
return re.sub(b"(\r\n)*$", b"\r\n", content)
def unfold_header_value(content):
return re.sub(b"\r\n", b"", content)
class Simple:
"""Class that represents the "simple" canonicalization algorithm."""
name = b"simple"
@staticmethod
def canonicalize_headers(headers):
# No changes to headers.
return headers
@staticmethod
def canonicalize_body(body):
# Ignore all empty lines at the end of the message body.
return strip_trailing_lines(body)
class Relaxed:
"""Class that represents the "relaxed" canonicalization algorithm."""
name = b"relaxed"
@staticmethod
def canonicalize_headers(headers):
# Convert all header field names to lowercase.
# Unfold all header lines.
# Compress WSP to single space.
# Remove all WSP at the start or end of the field value (strip).
return [
(x[0].lower().rstrip(),
compress_whitespace(unfold_header_value(x[1])).strip() + b"\r\n")
for x in headers]
@staticmethod
def canonicalize_body(body):
# Remove all trailing WSP at end of lines.
# Compress non-line-ending WSP to single space.
# Ignore all empty lines at the end of the message body.
return strip_trailing_lines(
compress_whitespace(strip_trailing_whitespace(body)))
class CanonicalizationPolicy:
def __init__(self, header_algorithm, body_algorithm):
self.header_algorithm = header_algorithm
self.body_algorithm = body_algorithm
@classmethod
def from_c_value(cls, c):
"""Construct the canonicalization policy described by a c= value.
May raise an C{InvalidCanonicalizationPolicyError} if the given
value is invalid
@param c: c= value from a DKIM-Signature header field
@return: a C{CanonicalizationPolicy}
"""
if c is None:
c = b'simple/simple'
m = c.split(b'/')
if len(m) not in (1, 2):
raise InvalidCanonicalizationPolicyError(c)
if len(m) == 1:
m.append(b'simple')
can_headers, can_body = m
try:
header_algorithm = ALGORITHMS[can_headers]
body_algorithm = ALGORITHMS[can_body]
except KeyError as e:
raise InvalidCanonicalizationPolicyError(e.args[0])
return cls(header_algorithm, body_algorithm)
def to_c_value(self):
return b'/'.join(
(self.header_algorithm.name, self.body_algorithm.name))
def canonicalize_headers(self, headers):
return self.header_algorithm.canonicalize_headers(headers)
def canonicalize_body(self, body):
return self.body_algorithm.canonicalize_body(body)
ALGORITHMS = dict((c.name, c) for c in (Simple, Relaxed))
| abpai/mailin-test | python/dkim/canonicalization.py | Python | mit | 4,252 |
#!/usr/bin/env python
"""
Install.py tool to download, compile, and setup the kim-api library
used to automate the steps described in the README file in this dir
"""
from __future__ import print_function
import sys, os, subprocess, shutil
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import fullpath, geturl, checkmd5sum
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
# settings
thisdir = fullpath('.')
version = "2.2.1"
# known checksums for different KIM-API versions. used to validate the download.
checksums = { \
'2.1.2' : '6ac52e14ef52967fc7858220b208cba5', \
'2.1.3' : '6ee829a1bbba5f8b9874c88c4c4ebff8', \
'2.2.0' : 'e7f944e1593cffd7444679a660607f6c', \
'2.2.1' : 'ae1ddda2ef7017ea07934e519d023dca', \
}
# help message
HELP = """
Syntax from src dir: make lib-kim args="-b -v version -a kim-name"
or: make lib-kim args="-b -a everything"
or: make lib-kim args="-n -a kim-name"
or: make lib-kim args="-p /usr/local/open-kim -a kim-name"
Syntax from lib dir: python Install.py -b -v version -a kim-name
or: python Install.py -b -a everything
or: python Install.py -n -a kim-name
or: python Install.py -p /usr/local/open-kim -a kim-name
Examples:
make lib-kim args="-b" # install KIM API lib with only example models
make lib-kim args="-b -a EAM_ErcolessiAdams_1994_Al__MO_324507536345_002" # Ditto plus one model
make lib-kim args="-b -a everything" # install KIM API lib with all models
make lib-kim args="-n -a EAM_Dynamo_Ackland_2003_W__MO_141627196590_005" # only add one model or model driver
See the list of all KIM models here:
https://openkim.org/browse/models
"""
pgroup = parser.add_mutually_exclusive_group()
pgroup.add_argument("-b", "--build", action="store_true",
help="download and build base KIM API library with example Models.")
pgroup.add_argument("-n", "--nobuild", action="store_true",
help="use the previously downloaded and compiled base KIM API.")
pgroup.add_argument("-p", "--path",
help="specify location of existing KIM API installation.")
parser.add_argument("-v", "--version", default=version, choices=checksums.keys(),
help="set version of KIM API library to download and build (default: %s)" % version)
parser.add_argument("-a", "--add",
help="add single KIM model or model driver. If adding 'everything', then all available OpenKIM models are added (may take a long time)")
parser.add_argument("-vv", "--verbose", action="store_true",
help="be more verbose about is happening while this script runs")
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.build and not args.path and not args.nobuild:
parser.print_help()
sys.exit(HELP)
buildflag = args.build
pathflag = args.path is not None
addflag = args.add is not None
addmodelname = args.add
everythingflag = False
if addflag and addmodelname == "everything":
everythingflag = True
buildflag = True
verboseflag = args.verbose
version = args.version
if pathflag:
buildflag = False
kimdir = args.path
if not os.path.isdir(kimdir):
sys.exit("KIM API path %s does not exist" % kimdir)
kimdir = fullpath(kimdir)
url = "https://s3.openkim.org/kim-api/kim-api-%s.txz" % version
# set KIM API directory
if pathflag:
# configure LAMMPS to use existing kim-api installation
with open("%s/kim-prefix.txt" % thisdir, 'w') as pffile:
pffile.write("%s" % kimdir)
print("Created %s/kim-prefix.txt\n using %s" % (thisdir,kimdir))
else:
kimdir = os.path.join(os.path.abspath(thisdir), "installed-" + version)
if args.nobuild and not os.path.isdir(kimdir):
sys.exit("Cannot use -n/--nobuild without first building the KIM API with -b")
# download KIM tarball, unpack, build KIM
if buildflag:
# check to see if an installed kim-api already exists and wipe it out.
if os.path.isdir(kimdir):
print("kim-api is already installed at %s.\nRemoving it for re-install" % kimdir)
shutil.rmtree(kimdir)
# configure LAMMPS to use kim-api to be installed
with open("%s/kim-prefix.txt" % thisdir, 'w') as pffile:
pffile.write("%s" % kimdir)
print("Created %s/kim-prefix.txt\n using %s" % (thisdir,kimdir))
# download entire kim-api tarball
print("Downloading kim-api tarball ...")
filename = "kim-api-%s.txz" % version
geturl(url, "%s/%s" % (thisdir, filename))
# verify downloaded archive integrity via md5 checksum, if known.
if version in checksums:
if not checkmd5sum(checksums[version], filename):
sys.exit("Checksum for KIM-API library does not match")
print("Unpacking kim-api tarball ...")
cmd = 'cd "%s"; rm -rf "kim-api-%s"; tar -xJvf %s' % (thisdir, version, filename)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
# configure kim-api
print("Configuring kim-api ...")
cmd = 'cd "%s/kim-api-%s" && mkdir build && cd build && cmake .. -DCMAKE_INSTALL_PREFIX="%s" -DCMAKE_BUILD_TYPE=Release' % (thisdir,version,kimdir)
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
if verboseflag: print(txt.decode("UTF-8"))
# build kim-api
print("Building kim-api ...")
cmd = 'cd "%s/kim-api-%s/build" && make -j2' % (thisdir, version)
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
if verboseflag:
print(txt.decode("UTF-8"))
# install kim-api
print("Installing kim-api ...")
cmd = 'cd "%s/kim-api-%s/build" && make -j2 install' % (thisdir, version)
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
if verboseflag:
print(txt.decode("UTF-8"))
# remove source files
print("Removing kim-api source and build files ...")
cmd = 'cd "%s"; rm -rf kim-api-%s; rm -rf kim-api-%s.txz' % (thisdir, version, version)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
# add all OpenKIM models, if desired
if everythingflag:
print("Adding all OpenKIM models, this will take a while ...")
cmd = '%s/bin/kim-api-collections-management install system OpenKIM' % (kimdir)
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
if verboseflag:
print(txt.decode("UTF-8"))
# add single OpenKIM model
if addflag:
pf_path = os.path.join(thisdir, "kim-prefix.txt")
if os.path.isfile(pf_path):
cmd = 'cat %s' % pf_path
kimdir = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
if not os.path.isdir(kimdir):
sys.exit("\nkim-api is not installed")
# download single model
cmd = '%s/bin/kim-api-collections-management install system %s' % (kimdir.decode("UTF-8"), addmodelname)
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
if verboseflag:
print(txt.decode("UTF-8"))
| jeremiahyan/lammps | lib/kim/Install.py | Python | gpl-2.0 | 7,038 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from scalar import *
class NonUniformScalarEncoder(ScalarEncoder):
"""
This is an implementation of the scalar encoder that encodes
the value into unequal ranges, such that each encoding occurs with
approximately equal frequency.
This means that value ranges that occur more frequently will have higher
resolution than those that occur less frequently
"""
############################################################################
def __init__(self, w, n, data = None, bins = None,
weights=None, name=None, verbosity=0):
self._numBins = n - w + 1
self.weights = weights
super(NonUniformScalarEncoder, self).__init__(w=w, n=n, minval= 0, maxval=self._numBins-1,
clipInput=True, name=name,
verbosity=verbosity)
hasData = data is None
hasBins = bins is None
if hasData == hasBins:
raise ValueError("Exactly one argument must be supplied: data or bins")
if data is not None:
self.data = numpy.array(data)
self.bins = self.ComputeBins(self._numBins, self.data, self.weights, self.verbosity)
if bins is not None:
#if self._numBins != len(bins):
# raise ValueError(
# '''Incorrect number of bins for given resolution
# Num bins supplied:%d
# Num bins expected (according to n and w):%d''' %(len(bins), self._numBins))
self.bins = numpy.array(bins)
self._numBins = self.bins.shape[0]
############################################################################
@classmethod
def ComputeBins(cls, nBins, data, weights=None, verbosity = 0):
data = numpy.array(data)
bins = numpy.zeros((nBins, 2))
#If no weights were specified, default to uniformly wieghted
if weights is None:
weights = numpy.ones(data.shape, dtype = defaultDtype)
sortedIndices = numpy.argsort(data)
sortedValues = data[sortedIndices]
sortedWeights = weights[sortedIndices]
cumWeights = numpy.cumsum(sortedWeights)
avgBinWeight = cumWeights[-1] / nBins
#Prepend 0s to the values and weights because we
#are actually dealing with intervals, not values
sortedValues = numpy.append(sortedValues[0], sortedValues)
cumWeights = numpy.append(0, cumWeights)
#-------------------------------------------------------------------------
# Iterate through each bin and find the appropriate start
# and end value for each one. We use the numpy.interp
# function to deal with non-integer indices
startValue = sortedValues[0]
cumBinWeight = 0
binIndex = 0
if verbosity > 0:
print "Average Bin Weight: %.3f"% avgBinWeight
while True:
# Use the inverse cumulative probability mass function
# to compute the bin endpoint
bins[binIndex, 0] = startValue
cumBinWeight += avgBinWeight
endValue = numpy.interp(cumBinWeight, xp=cumWeights, fp=sortedValues)
bins[binIndex,1] = endValue
if verbosity > 1:
print "Start Value:%.2f EndValue:%.2f" %(startValue, endValue)
if abs(cumWeights[-1] - cumBinWeight) < 1e-10:
break
startValue = endValue
binIndex += 1
# --------------------------------------------
# Cleanup: if there are any identical bins, only leave one copy
matches = (bins[0:-1, :] == bins[1:, :])
if numpy.any(matches):
# Assume the last bin is unique
matches = numpy.vstack([matches, [False, False]])
#matchingBins = numpy.all(matches, axis=1)
matchingBins = matches[:,0]
bins=bins[numpy.logical_not(matchingBins), :]
#All done, print out if necessary
if verbosity > 0:
print "Bins:\n", bins
return bins
############################################################################
def getBucketIndices(self, input):
"""[ScalarEncoder class method override]"""
if input != SENTINEL_VALUE_FOR_MISSING_DATA:
bin = self._findBin(input)
else:
bin = SENTINEL_VALUE_FOR_MISSING_DATA
return super(NonUniformScalarEncoder, self).getBucketIndices(bin)
############################################################################
def encodeIntoArray(self, input, output):
"""[ScalarEncoder class method override]"""
if input != SENTINEL_VALUE_FOR_MISSING_DATA:
bin = self._findBin(input)
else:
bin = SENTINEL_VALUE_FOR_MISSING_DATA
super(NonUniformScalarEncoder, self).encodeIntoArray(bin, output)
############################################################################
def _findBin(self, value):
assert self.bins is not None
lower = value >= self.bins[:,0]
upper = value < self.bins[:,1]
# The last range is both left and right inclusive
upper[-1] = (value <= self.bins[-1, -1])
bins = numpy.where(numpy.logical_and(lower,upper))[0]
if len(bins) == 0:
if value < self.bins[0,0]:
return -1
elif value >= self.bins[-1,-1]:
return self._numBins
else:
raise ValueError("Improper value for encoder: %f\nBins:%r" % (value, self.bins))
else:
assert len(bins)==1
return bins[0]
############################################################################
def decode(self, encoded, parentFieldName=""):
""" Overidden from scalar.py"""
(rangeDict, fieldNames) = super(NonUniformScalarEncoder, self).decode(encoded, parentFieldName)
range = self._getRangeForEncoding(encoded, rangeDict, fieldNames)
desc = self._generateRangeDescription([range])
for fieldName, (bins, desc) in rangeDict.iteritems():
rangeDict[fieldName] = ([range], desc)
return (rangeDict, fieldNames)
############################################################################
def _getRangeForEncoding(self, encoded, rangeDict, fieldNames):
assert len(rangeDict)==1
(bins, description) = rangeDict.values()[0]
assert len(bins)==1
bin = bins[0]
# if the decoding leads to a range of bin, just take the mean for now
if bin[0] == bin[1]:
binIndex = bin[0]
else:
binIndex = numpy.round(numpy.mean(bins))
assert binIndex >= 0 and binIndex < self.bins.shape[0]
curRange = self.bins[binIndex,:]
ranges = list(curRange)
return ranges
############################################################################
def _getTopDownMapping(self):
""" Return the interal _topDownMappingM matrix used for handling the
bucketInfo() and topDownCompute() methods. This is a matrix, one row per
category (bucket) where each row contains the encoded output for that
category.
"""
if self._topDownMappingM is None:
self._topDownMappingM = SM32(self._numBins, self.n)
outputSpace = numpy.zeros(self.n, dtype = GetNTAReal())
for i in xrange(self._numBins):
outputSpace[:] = 0.0
outputSpace[i:i+self.w] = 1.0
self._topDownMappingM.setRowFromDense(i, outputSpace)
return self._topDownMappingM
############################################################################
def getBucketValues(self):
""" See the function description in base.py """
if self._bucketValues is None:
topDownMappingM = self._getTopDownMapping()
numBuckets = topDownMappingM.nRows()
self._bucketValues = []
for bucketIdx in range(numBuckets):
self._bucketValues.append(self.getBucketInfo([bucketIdx])[0].value)
return self._bucketValues
############################################################################
def getBucketInfo(self, buckets):
""" See the function description in base.py """
topDownMappingM = self._getTopDownMapping()
binIndex = buckets[0]
value = numpy.mean(self.bins[binIndex, :])
return [EncoderResult(value=value, scalar=value,
encoding=self._topDownMappingM.getRow(binIndex))]
############################################################################
def topDownCompute(self, encoded):
""" See the function description in base.py """
topDownMappingM = self._getTopDownMapping()
binIndex = topDownMappingM.rightVecProd(encoded).argmax()
value = numpy.mean(self.bins[binIndex, :])
return [EncoderResult(value=value, scalar=value,
encoding=self._topDownMappingM.getRow(binIndex))]
############################################################################
def dump(self):
print "NonUniformScalarEncoder:"
print " ranges: %r"% self.bins
print " w: %d" % self.w
print " n: %d" % self.n
print " resolution: %f" % self.resolution
print " radius: %f" % self.radius
print " nInternal: %d" % self.nInternal
print " rangeInternal: %f" % self.rangeInternal
print " padding: %d" % self.padding
############################################################################
def testNonUniformScalarEncoder():
import numpy.random
print "Testing NonUniformScalarEncoder..."
def testEncoding(value, expected, encoder):
observed = None
expected = numpy.array(expected, dtype=defaultDtype)
try:
observed = encoder.encode(value)
assert(observed == expected).all()
except :
#print "Encoding Error: encoding value %f \
#\nexpected %s. got %s "% (value, str(expected), str(observed))
print "Encoder Bins:\n%s"% encoder.bins
raise Exception("Encoding Error: encoding value %f \
expected %s\n got %s "%
(value, str(expected), str(observed)))
# TODO: test parent class methods
#
# -----------------------------------------
# Start with simple uniform case:
print "\t*Testing uniform distribution*"
data = numpy.linspace( 1, 10, 10, endpoint = True)
enc = NonUniformScalarEncoder(w=7,n=16, data=data, verbosity=3)
expectedEncoding = numpy.zeros(16)
expectedEncoding[:7] = 1
for i in range(1,10):
testEncoding(i, expectedEncoding, enc)
expectedEncoding = numpy.roll(expectedEncoding, 1)
testEncoding(10, [0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0], enc)
del enc
## -----------------------------------------
# Make sure the encoder works with a larger set of
# bins and skewed distributions
print "\t*Testing skewed distribution*"
data = numpy.linspace(0, 10, 100)
data = numpy.append(data, numpy.linspace(10,20,200))
# Shuffle the data so that the order doesn't matter
numpy.random.shuffle(data)
enc = NonUniformScalarEncoder(w = 7, n=9, data=data)
testEncoding(5, [1,1,1,1,1,1,1,0,0], enc)
testEncoding(9, [1,1,1,1,1,1,1,0,0], enc)
testEncoding(10, [0,1,1,1,1,1,1,1,0], enc)
testEncoding(14.9, [0,1,1,1,1,1,1,1,0], enc)
testEncoding(15, [0,0,1,1,1,1,1,1,1], enc)
testEncoding(19, [0,0,1,1,1,1,1,1,1], enc)
del enc
## -----------------------------------------
## Make sure the encoder works with non-uniform wieghts
## bins and very skewed distributions
print "\t*Testing weighted distribution*"
data = numpy.linspace(0, 10, 100)
weights= 4 * numpy.ones_like(data)
data = numpy.append(data, numpy.linspace(10,20,200))
weights = numpy.append(weights, numpy.ones(200))
enc = NonUniformScalarEncoder(w = 7, n=9, data=data, weights=weights)
testEncoding(3, [1,1,1,1,1,1,1,0,0], enc)
testEncoding(5, [0,1,1,1,1,1,1,1,0], enc)
testEncoding(9, [0,1,1,1,1,1,1,1,0], enc)
testEncoding(10, [0,0,1,1,1,1,1,1,1], enc)
testEncoding(15, [0,0,1,1,1,1,1,1,1], enc)
del enc
#
## -----------------------------------------
## Stress test: make sure that ranges still
## make sense if there are a lot of bins
print "\t*Stress Test*"
data = numpy.concatenate([numpy.repeat(10, 30),
numpy.repeat(5, 20),
numpy.repeat(20, 35)])
enc = NonUniformScalarEncoder(w=7, n=100, data=data, verbosity=2)
result = numpy.zeros(100, dtype=defaultDtype)
result[0:7] = 1
testEncoding(5, result, enc)
## Now test a very discontinuous distribution
#TODO: Not really sure what should happen here
#data = 10 * numpy.ones(500)
#data[250:] *= 2
#enc = NonUniformScalarEncoder(w =3, n=4, data=data, verbosity = 2)
#
#assert enc.resolution == 1.0
#assert enc._numBins == 2
##assert(enc.bins == numpy.array([[0,10.0], [10.0, 20.0]])).all()
#
#testEncoding(-1, [1,1,1,0], enc)
#testEncoding(5, [1,1,1,0], enc)
#testEncoding(10, [0,1,1,1], enc)
#testEncoding(15, [0,1,1,1], enc)
#testEncoding(25, [0,1,1,1], enc)
#del enc
#
## -----------------------------------------
## Now a case similar to the first, but with the proportions slightly uneven
## TODO: What should actually happen here ?
#print "\t*Testing uneven distribution*"
#data = 10 * numpy.ones(500)
#data[248:] *= 2
#enc = NonUniformScalarEncoder(w =3, n=4, data=data, verbosity = 0)
#testEncoding(9, [1,1,1,0], enc)
#testEncoding(10, [1,1,1,0], enc)
#testEncoding(20, [0,1,1,1], enc)
#del enc
## -----------------------------------------
## Test top-down decoding
print "\t*Testing top-down decoding*"
data = numpy.random.random_sample(400)
enc = NonUniformScalarEncoder(w=7, n=9, data=data, verbosity=3)
print enc.dump()
output = numpy.array([1,1,1,1,1,1,1,0,0], dtype=defaultDtype)
for i in xrange(enc.n - enc.w + 1):
topdown = enc.topDownCompute(output)
bin = enc.bins[i,:]
assert topdown[0].value >= bin[0] and topdown[0].value < bin[1]
output = numpy.roll(output, 1)
print "\t*Test TD decoding with explicit bins*"
bins = [[ 0. , 199.7 ],
[ 199.7, 203.1 ],
[ 203.1, 207.655],
[ 207.655, 212.18 ],
[ 212.18, 214.118],
[ 214.118, 216.956],
[ 216.956, 219.133]]
enc = NonUniformScalarEncoder(w=7, n=13, bins=bins)
# -----------------------------------------
# Test TD compute on
tdOutput = numpy.array([ 0.0, 0.0, 0.0, 0.0, 0.40000001, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 0.60000002,
0.60000002])
enc.topDownCompute(tdOutput)
topdown = enc.topDownCompute(tdOutput)
testEncoding(topdown[0].value, [0,0,0,0,0,1,1,1,1,1,1,1,0], enc)
# -----------------------------------------
print "\t*Test TD decoding with non-contiguous ranges*"
tdOutput = numpy.array([ 1.0, 1.0, 1.0, 0.0, 0.0, 0.0,
1.0, 1.0, 1.0, 1.0, 1.0, 0.60000002,
0.60000002])
topdown = enc.topDownCompute(tdOutput)
testEncoding(topdown[0].value, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,1], enc)
print "passed"
############################################################################
if __name__ == '__main__':
testNonUniformScalarEncoder() | tkaitchuck/nupic | py/nupic/encoders/nonuniformscalar.py | Python | gpl-3.0 | 15,766 |
# This file is part of PyEMMA.
#
# Copyright (c) 2017 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import numpy as _np
from pyemma.coordinates import assign_to_centers as _assign_to_centers
__all__ = [
'DoubleWellSampler']
class DoubleWellSampler(object):
'''Continuous multi-ensemble MCMC process in an asymmetric double well potential'''
def __init__(self):
self.xmin = -1.8
self.xmid = 0.127
self.xmax = 1.7
self.step = 0.6
self.nstates = 50
self.stride = None
edges = _np.linspace(self.xmin, self.xmax, self.nstates + 1)
self.x = 0.5 * (edges[1:] + edges[:-1])
epot = self.potential(self.x)
self.pi = _np.exp(-epot)
self.pi[:] = self.pi / self.pi.sum()
self.f = -_np.log(self.pi)
@property
def centers(self):
return self.x.reshape(-1, 1)
def _potential(self, x):
try:
return _np.asarray(tuple(map(self._potential, x)))
except TypeError:
if x < self.xmin or x > self.xmax:
return _np.inf
return x * (0.5 + x * (x * x - 2.0))
def _bias(self, x, kbias, xbias):
try:
return 0.5 * kbias * (_np.asarray(x) - xbias)**2
except TypeError:
return 0.0
def potential(self, x, kt=1.0, kbias=None, xbias=None):
return (self._potential(x) + self._bias(x, kbias, xbias)) / kt
def mcmc(self, xinit, length, kt=1.0, kbias=None, xbias=None, stride=None):
xtraj = _np.zeros(shape=(length + 1,), dtype=_np.float64)
etraj = _np.zeros(shape=(length + 1,), dtype=_np.float64)
xtraj[0] = xinit
etraj[0] = self.potential(xinit, kt=kt, kbias=kbias, xbias=xbias)
for i in range(length):
x_candidate = xtraj[i] + self.step * (_np.random.rand() - 0.5)
e_candidate = self.potential(x_candidate, kt=kt, kbias=kbias, xbias=xbias)
if e_candidate < etraj[i] or _np.random.rand() < _np.exp(etraj[i] - e_candidate):
xtraj[i + 1] = x_candidate
etraj[i + 1] = e_candidate
else:
xtraj[i + 1] = xtraj[i]
etraj[i + 1] = etraj[i]
if stride is not None and stride > 1:
xtraj, etraj = _np.ascontiguousarray(xtraj[::stride]), _np.ascontiguousarray(etraj[::stride])
return xtraj, etraj
def _draw(self, xinit=None, right=False, weighted=True):
if xinit is None:
if right:
pad = 0.2
return _np.random.rand() * (self.xmax - self.xmid - 2.0 * pad) + self.xmid + pad
return _np.random.choice(self.x, size=1, p=self.pi if weighted is True else None)
return xinit
def sample(self, ntraj=1, xinit=None, length=10000):
trajs = [self.mcmc(
self._draw(xinit), length=length, stride=self.stride)[0] for i in range(ntraj)]
return dict(
trajs=trajs,
dtrajs=_assign_to_centers(trajs, centers=self.centers))
def us_sample(self, ntherm=11, us_fc=20.0, us_length=500, md_length=1000, nmd=20):
xbias = _np.linspace(self.xmin + 0.2, self.xmax - 0.2, ntherm + 1)
xbias = (0.5 * (xbias[1:] + xbias[:-1])).tolist()
kbias = [us_fc] * len(xbias)
us_trajs = [self.mcmc(
x, us_length, kbias=k, xbias=x, stride=self.stride)[0] for k, x in zip(kbias, xbias)]
md_trajs = [self.mcmc(
self._draw(right=True), md_length, stride=self.stride)[0] for i in range(nmd)]
dtrajs = _assign_to_centers(us_trajs + md_trajs, centers=self.centers)
return dict(
us_trajs=us_trajs,
us_dtrajs=dtrajs[:ntherm],
us_centers=xbias,
us_force_constants=kbias,
md_trajs=md_trajs,
md_dtrajs=dtrajs[ntherm:])
def mt_sample(self, kt0=1.0, kt1=5.0, length0=10000, length1=10000, n0=10, n1=10):
trajs = []
utrajs = []
ttrajs = []
for i in range(n0):
x, u = self.mcmc(
self._draw(right=True), length0, kt=kt0, stride=self.stride)
trajs.append(x)
utrajs.append(u)
ttrajs.append(_np.asarray([kt0] * x.shape[0]))
for i in range(n1):
x, u = self.mcmc(
self._draw(weighted=False), length1, kt=kt1, stride=self.stride)
trajs.append(x)
utrajs.append(u)
ttrajs.append(_np.asarray([kt1] * x.shape[0]))
return dict(
trajs=trajs,
energy_trajs=utrajs,
temp_trajs=ttrajs,
dtrajs=_assign_to_centers(trajs, centers=self.centers))
| marscher/PyEMMA | pyemma/datasets/double_well_thermo.py | Python | lgpl-3.0 | 5,386 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import cStringIO
import json
import optparse
import tempfile
from cairosvg import svg2png
from flask import Flask
from flask import request
import numpy as np
from PIL import Image
from rdp import rdp
import svgwrite
import tensorflow as tf
from magenta.models.sketch_rnn.sketch_rnn_train import *
from magenta.models.sketch_rnn.model import *
from magenta.models.sketch_rnn.utils import *
from magenta.models.sketch_rnn.rnn import *
app = Flask(__name__)
def draw_strokes(data, factor=0.12):
with tempfile.NamedTemporaryFile() as temp_svg:
min_x, max_x, min_y, max_y = get_bounds(data, factor)
dims = (50 + max_x - min_x, 50 + max_y - min_y)
dwg = svgwrite.Drawing(temp_svg.name, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims, fill='white'))
lift_pen = 1
abs_x, abs_y = 25 - min_x, 25 - min_y
p = 'M%s,%s ' % (abs_x, abs_y)
command = 'm'
for i in xrange(len(data)):
if lift_pen == 1:
command = 'm'
elif command != 'l':
command = 'l'
else:
command = ''
x, y = float(data[i, 0])/factor, float(data[i, 1])/factor
lift_pen = data[i, 2]
p += command+str(x)+','+str(y)+' '
the_color = 'black'
stroke_width = 1
dwg.add(dwg.path(p).stroke(the_color, stroke_width).fill('none'))
dwg.save()
temp_svg.flush()
with tempfile.NamedTemporaryFile() as temp_png:
svg2png(url=temp_svg.name, write_to=temp_png.name)
image = Image.open(temp_png.name)
image_buffer = cStringIO.StringIO()
image.save(image_buffer, format='PNG')
imgstr = 'data:image/png;base64,{:s}'.format(
base64.b64encode(image_buffer.getvalue()))
return imgstr
class SketchGenerator(object):
def __init__(self):
[hps_model, eval_hps_model, sample_hps_model] = load_model(MODEL_DIR)
_ = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
load_checkpoint(sess, MODEL_DIR)
self.sess = sess
self.eval_model = eval_model
self.sample_model = sample_model
def encode(self, image):
def _compress_stroke(strokes):
result = [[x/10.0, y/10.0, 0] for [x, y] in rdp(strokes, epsilon=1.0)]
result[-1][2] = 1
return result
result = []
strokes = []
for i in range(0, len(image), 3):
x, y, pen_down = image[i], image[i+1], image[i+2]
if pen_down == 0:
if strokes:
result += _compress_stroke(strokes)
strokes = [[x, y]]
else:
strokes.append([x, y])
if strokes:
result += _compress_stroke(strokes)
pre_x, pre_y = 0, 0
for i, (x, y, pen) in enumerate(result):
result[i] = [x - pre_x, y - pre_y, pen]
pre_x, pre_y = x, y
strokes = to_big_strokes(np.array(result)).tolist()
strokes = strokes[:MAX_SEQ_LEN+1]
seq_len = [min(MAX_SEQ_LEN, len(result))]
z = self.sess.run(self.eval_model.batch_z,
feed_dict={self.eval_model.input_data: [strokes],
self.eval_model.sequence_lengths: seq_len})[0]
return draw_strokes(to_normal_strokes(np.array(strokes))), z
def decode(self, z_input, temperature=0.1):
z = [z_input]
sample_strokes, _ = sample(self.sess, self.sample_model,
seq_len=self.eval_model.hps.max_seq_len,
temperature=temperature, z=z)
strokes = to_normal_strokes(sample_strokes)
return draw_strokes(strokes)
@app.route('/post', methods=['POST'])
def post():
strokes = request.get_json()
original, z = client.encode(strokes)
kid1 = client.decode(z, temperature=0.1)
kid2 = client.decode(z, temperature=0.5)
kid3 = client.decode(z, temperature=0.9)
result = json.dumps({'kid1': kid1, 'kid2': kid2, 'kid3': kid3,
'original': original})
return result
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option(
'-p', '--port',
type='int',
dest='port',
default=8081)
parser.add_option(
'-d', '--dir',
type='string',
dest='model_dir',
default='/opt/sketch_demo/models/catbus/lstm')
options, _ = parser.parse_args()
port = options.port
MODEL_DIR = options.model_dir
with open(MODEL_DIR + '/model_config.json') as config:
MAX_SEQ_LEN = json.load(config)['max_seq_len']
client = SketchGenerator()
app.run(host='127.0.0.1', port=port, debug=False)
| GoogleCloudPlatform/tensorflow-sketch-rnn-example | sketch_demo/backend.py | Python | apache-2.0 | 5,176 |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
from random import randint
class BackoffStrategy:
def delay(self, attempt):
raise NotImplementedError("BackoffStrategy::delay is abstract!")
class ConstantBackoff(BackoffStrategy):
def __init__(self, base_delay):
self.base_delay = base_delay
def delay(self, attempts):
return self.base_delay
class ExponentialBackoff(ConstantBackoff):
def __init__(self, base_delay):
super().__init__(base_delay)
def delay(self, attempts):
if attempts == 0:
return 0
else:
limit = 2 ** attempts - 1
return self._pick_up_to(limit) * self.base_delay
@staticmethod
def _pick_up_to(limit):
return randint(0, limit) | fchauvel/MAD | mad/simulation/backoff.py | Python | gpl-3.0 | 1,390 |
# Time: O(n * S)
# Space: O(S)
# You are given a list of non-negative integers, a1, a2, ..., an,
# and a target, S. Now you have 2 symbols + and -.
# For each integer, you should choose one from + and - as its new symbol.
#
# Find out how many ways to assign symbols to make sum of integers equal to target S.
#
# Example 1:
# Input: nums is [1, 1, 1, 1, 1], S is 3.
# Output: 5
# Explanation:
#
# -1+1+1+1+1 = 3
# +1-1+1+1+1 = 3
# +1+1-1+1+1 = 3
# +1+1+1-1+1 = 3
# +1+1+1+1-1 = 3
#
# There are 5 ways to assign symbols to make the sum of nums be target 3.
# Note:
# The length of the given array is positive and will not exceed 20.
# The sum of elements in the given array will not exceed 1000.
# Your output answer is guaranteed to be fitted in a 32-bit integer.
import collections
class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
def subsetSum(nums, S):
dp = collections.defaultdict(int)
dp[0] = 1
for n in nums:
for i in reversed(xrange(n, S+1)):
if i-n in dp:
dp[i] += dp[i-n]
return dp[S]
total = sum(nums)
if total < S or (S + total) % 2: return 0
P = (S + total) // 2
return subsetSum(nums, P)
| tudennis/LeetCode---kamyu104-11-24-2015 | Python/target-sum.py | Python | mit | 1,368 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import boto
from boto.ec2.regioninfo import RegionInfo
import commands
import httplib
import os
import paramiko
import sys
import time
import unittest
from smoketests import flags
SUITE_NAMES = '[image, instance, volume]'
FLAGS = flags.FLAGS
flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
flags.DEFINE_integer('ssh_tries', 3, 'Numer of times to try ssh')
class SmokeTestCase(unittest.TestCase):
def connect_ssh(self, ip, key_name):
key = paramiko.RSAKey.from_private_key_file('/tmp/%s.pem' % key_name)
tries = 0
while(True):
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ip, username='root', pkey=key, timeout=5)
return client
except (paramiko.AuthenticationException, paramiko.SSHException):
tries += 1
if tries == FLAGS.ssh_tries:
raise
def can_ping(self, ip, command="ping"):
"""Attempt to ping the specified IP, and give up after 1 second."""
# NOTE(devcamcar): ping timeout flag is different in OSX.
if sys.platform == 'darwin':
timeout_flag = 't'
else:
timeout_flag = 'w'
status, output = commands.getstatusoutput('%s -c1 -%s1 %s' %
(command, timeout_flag, ip))
return status == 0
def wait_for_running(self, instance, tries=60, wait=1):
"""Wait for instance to be running"""
for x in xrange(tries):
instance.update()
if instance.state.startswith('running'):
return True
time.sleep(wait)
else:
return False
def wait_for_deleted(self, instance, tries=60, wait=1):
"""Wait for instance to be deleted"""
for x in xrange(tries):
try:
#NOTE(dprince): raises exception when instance id disappears
instance.update(validate=True)
except ValueError:
return True
time.sleep(wait)
else:
return False
def wait_for_ping(self, ip, command="ping", tries=120):
"""Wait for ip to be pingable"""
for x in xrange(tries):
if self.can_ping(ip, command):
return True
else:
return False
def wait_for_ssh(self, ip, key_name, tries=30, wait=5):
"""Wait for ip to be sshable"""
for x in xrange(tries):
try:
conn = self.connect_ssh(ip, key_name)
conn.close()
except Exception, e:
time.sleep(wait)
else:
return True
else:
return False
def connection_for_env(self, **kwargs):
"""
Returns a boto ec2 connection for the current environment.
"""
access_key = os.getenv('EC2_ACCESS_KEY')
secret_key = os.getenv('EC2_SECRET_KEY')
clc_url = os.getenv('EC2_URL')
if not access_key or not secret_key or not clc_url:
raise Exception('Missing EC2 environment variables. Please source '
'the appropriate novarc file before running this '
'test.')
parts = self.split_clc_url(clc_url)
if FLAGS.use_ipv6:
return boto_v6.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
region=RegionInfo(None,
'nova',
parts['ip']),
port=parts['port'],
path='/services/Cloud',
**kwargs)
return boto.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
region=RegionInfo(None,
'nova',
parts['ip']),
port=parts['port'],
path='/services/Cloud',
**kwargs)
def split_clc_url(self, clc_url):
"""
Splits a cloud controller endpoint url.
"""
parts = httplib.urlsplit(clc_url)
is_secure = parts.scheme == 'https'
ip, port = parts.netloc.split(':')
return {'ip': ip, 'port': int(port), 'is_secure': is_secure}
def create_key_pair(self, conn, key_name):
try:
os.remove('/tmp/%s.pem' % key_name)
except Exception:
pass
key = conn.create_key_pair(key_name)
key.save('/tmp/')
return key
def delete_key_pair(self, conn, key_name):
conn.delete_key_pair(key_name)
try:
os.remove('/tmp/%s.pem' % key_name)
except Exception:
pass
def bundle_image(self, image, tempdir='/tmp', kernel=False):
cmd = 'euca-bundle-image -i %s -d %s' % (image, tempdir)
if kernel:
cmd += ' --kernel true'
status, output = commands.getstatusoutput(cmd)
if status != 0:
print '%s -> \n %s' % (cmd, output)
raise Exception(output)
return True
def upload_image(self, bucket_name, image, tempdir='/tmp'):
cmd = 'euca-upload-bundle -b '
cmd += '%s -m %s/%s.manifest.xml' % (bucket_name, tempdir, image)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print '%s -> \n %s' % (cmd, output)
raise Exception(output)
return True
def delete_bundle_bucket(self, bucket_name):
cmd = 'euca-delete-bundle --clear -b %s' % (bucket_name)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print '%s -> \n%s' % (cmd, output)
raise Exception(output)
return True
TEST_DATA = {}
if FLAGS.use_ipv6:
global boto_v6
boto_v6 = __import__('boto_v6')
class UserSmokeTestCase(SmokeTestCase):
def setUp(self):
global TEST_DATA
self.conn = self.connection_for_env()
self.data = TEST_DATA
| fajoy/nova | smoketests/base.py | Python | apache-2.0 | 7,286 |
#!/usr/bin/env python
import os
from migrate.versioning.shell import main
import settings
db_url = settings.DATABASE_URL
app_path = os.path.relpath(os.path.dirname(os.path.abspath(__file__)),
os.getcwd())
repository = os.path.join(app_path, 'migrations')
if __name__ == '__main__':
main(url=db_url, debug='True', repository=repository)
| nooodle/noodlefeed | noodlefeed/migration.py | Python | bsd-3-clause | 372 |
import contextlib
import logging
import os
import pathlib
import sys
import tempfile
import pytest
from _pytest.logging import LogCaptureFixture
import rasa.cli.utils
@contextlib.contextmanager
def make_actions_subdir():
"""Create a subdir called actions to test model argument handling."""
with tempfile.TemporaryDirectory() as tempdir:
cwd = os.getcwd()
os.chdir(tempdir)
try:
(pathlib.Path(tempdir) / "actions").mkdir()
yield
finally:
os.chdir(cwd)
@pytest.mark.parametrize(
"argv",
[
["rasa", "run"],
["rasa", "run", "actions"],
["rasa", "run", "core"],
["rasa", "interactive", "nlu", "--param", "xy"],
],
)
def test_parse_last_positional_argument_as_model_path(argv):
with make_actions_subdir():
test_model_dir = tempfile.gettempdir()
argv.append(test_model_dir)
sys.argv = argv.copy()
rasa.cli.utils.parse_last_positional_argument_as_model_path()
assert sys.argv[-2] == "--model"
assert sys.argv[-1] == test_model_dir
@pytest.mark.parametrize(
"argv",
[
["rasa", "run"],
["rasa", "run", "actions"],
["rasa", "run", "core"],
["rasa", "test", "nlu", "--param", "xy", "--model", "test"],
],
)
def test_parse_no_positional_model_path_argument(argv):
with make_actions_subdir():
sys.argv = argv.copy()
rasa.cli.utils.parse_last_positional_argument_as_model_path()
assert sys.argv == argv
def test_validate_invalid_path():
with pytest.raises(SystemExit):
rasa.cli.utils.get_validated_path("test test test", "out", "default")
def test_validate_valid_path(tmp_path: pathlib.Path):
assert rasa.cli.utils.get_validated_path(str(tmp_path), "out", "default") == str(
tmp_path
)
def test_validate_if_none_is_valid():
assert rasa.cli.utils.get_validated_path(None, "out", "default", True) is None
def test_validate_with_none_if_default_is_valid(
caplog: LogCaptureFixture, tmp_path: pathlib.Path
):
with caplog.at_level(logging.WARNING, rasa.cli.utils.logger.name):
assert rasa.cli.utils.get_validated_path(None, "out", str(tmp_path)) == str(
tmp_path
)
assert caplog.records == []
def test_validate_with_invalid_directory_if_default_is_valid(tmp_path: pathlib.Path):
invalid_directory = "gcfhvjkb"
with pytest.warns(UserWarning) as record:
assert rasa.cli.utils.get_validated_path(
invalid_directory, "out", str(tmp_path)
) == str(tmp_path)
assert len(record) == 1
assert "does not seem to exist" in record[0].message.args[0]
| RasaHQ/rasa_nlu | tests/cli/test_utils.py | Python | apache-2.0 | 2,701 |
from ..error import DataConsistencyError
from ..objects.order_line import OrderLine
from .base import ResourceBase
class OrderLines(ResourceBase):
order_id = None
def get_resource_name(self):
return f"orders/{self.order_id}/lines"
def get_resource_object(self, result):
return OrderLine(result, self.client)
def with_parent_id(self, order_id):
self.order_id = order_id
return self
def on(self, order):
return self.with_parent_id(order.id)
def delete(self, data, *args):
"""
Custom handling for deleting orderlines.
Orderlines are deleted by issuing a DELETE on the orders/*/lines endpoint,
with the orderline IDs and quantities in the request body.
"""
path = self.get_resource_name()
result = self.perform_api_call(self.REST_DELETE, path, data=data)
return result
def update(self, resource_id, data=None, **params):
"""
Custom handling for updating orderlines.
The API returns an Order object. Since we are sending the request through an orderline object, it makes more
sense to convert the returned object to to the updated orderline object.
If you wish to retrieve the order object, you can do so by using the order_id property of the orderline.
"""
path = self.get_resource_name() + "/" + str(resource_id)
result = self.perform_api_call(self.REST_UPDATE, path, data=data)
for line in result["lines"]:
if line["id"] == resource_id:
return self.get_resource_object(line)
raise DataConsistencyError(f"Line id {resource_id} not found in response.")
| mollie/mollie-api-python | mollie/api/resources/order_lines.py | Python | bsd-2-clause | 1,701 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from edenscmnative.clindex import * # noqa: F401, F403
from edenscmnative.clindex import __doc__ # noqa: F401
| facebookexperimental/eden | eden/hg-server/edenscm/hgext/clindex.py | Python | gpl-2.0 | 280 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "shogun"
cfg.versionfile_source = "shogun/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| knights-lab/NINJA-SHOGUN | shogun/_version.py | Python | mit | 18,449 |
"""Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
| utluiz/utluiz.github.io | pyscript/Lib/unittest/result.py | Python | mit | 6,473 |
import itertools
import time
import climin
import climin.initialize
import climin.stops
from trainers.tangMlp import TangMlp, squared_hinge
__author__ = 'kosklain'
class NeuralTrainer(object):
def __init__(self, X, VX, Z, VZ):
self.X = X
self.VX = VX
self.Z = Z
self.VZ = VZ
def run(self):
max_passes = 400
batch_size = 200
max_iter = max_passes * self.X.shape[0] / batch_size
n_report = max(self.X.shape[0] / batch_size, 1)
noise_schedule = (1 - float(i) / max_iter for i in xrange(max_iter))
noise_schedule = itertools.chain(noise_schedule, itertools.repeat(0))
optimizer = 'rmsprop', {'steprate': 0.001, 'momentum': 0.9, 'decay': 0.9, 'step_adapt': 0.01}
m = TangMlp(self.X.shape[1], [4000], 1, hidden_transfers=['sigmoid'],
out_transfer='identity', loss=squared_hinge, noise_schedule=noise_schedule,
optimizer=optimizer, batch_size=batch_size, max_iter=max_iter)
climin.initialize.randomize_normal(m.parameters.data, 0, 0.02)
m.parameters['out_bias'][...] = 0
weight_decay = ((m.parameters.hidden_to_out ** 2).sum())
weight_decay /= m.exprs['inpt'].shape[0]
m.exprs['true_loss'] = m.exprs['loss']
c_wd = 0.001
m.exprs['loss'] = m.exprs['loss'] + c_wd * weight_decay
f_wd = m.function(['inpt'], c_wd * weight_decay)
n_wrong = abs(m.exprs['output'] - m.exprs['target']).mean()
f_n_wrong = m.function(['inpt', 'target'], n_wrong)
losses = []
v_losses = []
print 'max iter', max_iter
start = time.time()
# Set up a nice printout.
keys = '#', 'loss', 'val loss', 'seconds', 'wd', 'train emp', 'test emp'
max_len = max(len(i) for i in keys)
header = '\t'.join(i for i in keys)
print header
print '-' * len(header)
stop = climin.stops.any_([
climin.stops.after_n_iterations(max_iter),
])
pause = climin.stops.modulo_n_iterations(n_report)
for i, info in enumerate(m.powerfit((self.X, self.Z), (self.VX, self.VZ), stop, pause)):
if info['n_iter'] % n_report != 0:
continue
passed = time.time() - start
losses.append(info['loss'])
v_losses.append(info['val_loss'])
info.update({
'time': passed,
'l2-loss': f_wd(self.X),
'train_emp': f_n_wrong(self.X, self.Z),
'test_emp': f_n_wrong(self.VX, self.VZ),
})
row = '%(n_iter)i\t%(loss)g\t%(val_loss)g\t%(time)g\t%(l2-loss)g\t%(train_emp)g\t%(test_emp)g' % info
print row
return m | kosklain/StackoverflowQuestions | trainers/neuralTrainer.py | Python | gpl-2.0 | 2,772 |
# Copyright (C) 2009-2015 Contributors as noted in the AUTHORS file
#
# This file is part of Autopilot.
#
# Autopilot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Autopilot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Autopilot. If not, see <http://www.gnu.org/licenses/>.
import sys
from appargs import ApplicationArguments
from lib.manuscript.manuscript import Manuscript
import lib.reporting.logger as logger
from lib.reporting.logger import Logger
from lib.app.bootstrap import start_app
from lib.instructions.base.instructions import Instructions
class TestEngine(object):
def __init__(self):
self.appargs = ApplicationArguments()
logger.set_debug(self.appargs.degbug())
self.manuscript = Manuscript(self.appargs.manuscripts(), self.appargs.paths(), self.appargs.placeholders())
if self.appargs.autoexit():
self.manuscript.add_autoexit_instruction()
def _log_effective_manuscript(self):
Logger.set_path(self.manuscript.get_log_path())
Logger.add_section("Manuscript", u"%s" % self.manuscript)
def _test(self):
Logger.set_path(self.manuscript.get_log_path())
Logger.set_log_dialog_descriptions(self.appargs.log_dialog_descriptions())
Logger.add_debug("Application arguments")
Logger.add_debug(" ".join(sys.argv))
Logger.add_section("Manuscript", u"%s" % self.manuscript)
instructions = Instructions(self.manuscript, self.appargs.timedelay())
start_app(instructions, self.appargs.program())
def run(self):
if self.appargs.investigate():
self._log_effective_manuscript()
else:
try:
self._test()
finally:
Logger.log()
return not Logger.has_errors()
if __name__ == '__main__':
TestEngine().run()
| rogerlindberg/autopilot | src/lib/app/testengine.py | Python | gpl-3.0 | 2,292 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import HTMLParser
import logging
import os
import re
import tempfile
import xml.etree.ElementTree
from devil.android import apk_helper
from pylib import constants
from pylib.constants import host_paths
from pylib.base import base_test_result
from pylib.base import test_instance
from pylib.utils import isolator
with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
import unittest_util # pylint: disable=import-error
BROWSER_TEST_SUITES = [
'components_browsertests',
'content_browsertests',
]
RUN_IN_SUB_THREAD_TEST_SUITES = ['net_unittests']
# Used for filtering large data deps at a finer grain than what's allowed in
# isolate files since pushing deps to devices is expensive.
# Wildcards are allowed.
_DEPS_EXCLUSION_LIST = [
'chrome/test/data/extensions/api_test',
'chrome/test/data/extensions/secure_shell',
'chrome/test/data/firefox*',
'chrome/test/data/gpu',
'chrome/test/data/image_decoding',
'chrome/test/data/import',
'chrome/test/data/page_cycler',
'chrome/test/data/perf',
'chrome/test/data/pyauto_private',
'chrome/test/data/safari_import',
'chrome/test/data/scroll',
'chrome/test/data/third_party',
'third_party/hunspell_dictionaries/*.dic',
# crbug.com/258690
'webkit/data/bmp_decoder',
'webkit/data/ico_decoder',
]
_EXTRA_NATIVE_TEST_ACTIVITY = (
'org.chromium.native_test.NativeTestInstrumentationTestRunner.'
'NativeTestActivity')
_EXTRA_RUN_IN_SUB_THREAD = (
'org.chromium.native_test.NativeTest.RunInSubThread')
EXTRA_SHARD_NANO_TIMEOUT = (
'org.chromium.native_test.NativeTestInstrumentationTestRunner.'
'ShardNanoTimeout')
_EXTRA_SHARD_SIZE_LIMIT = (
'org.chromium.native_test.NativeTestInstrumentationTestRunner.'
'ShardSizeLimit')
# TODO(jbudorick): Remove these once we're no longer parsing stdout to generate
# results.
_RE_TEST_STATUS = re.compile(
r'\[ +((?:RUN)|(?:FAILED)|(?:OK)|(?:CRASHED)) +\]'
r' ?([^ ]+)?(?: \((\d+) ms\))?$')
# Crash detection constants.
_RE_TEST_ERROR = re.compile(r'FAILURES!!! Tests run: \d+,'
r' Failures: \d+, Errors: 1')
_RE_TEST_CURRENTLY_RUNNING = re.compile(r'\[ERROR:.*?\]'
r' Currently running: (.*)')
def ParseGTestListTests(raw_list):
"""Parses a raw test list as provided by --gtest_list_tests.
Args:
raw_list: The raw test listing with the following format:
IPCChannelTest.
SendMessageInChannelConnected
IPCSyncChannelTest.
Simple
DISABLED_SendWithTimeoutMixedOKAndTimeout
Returns:
A list of all tests. For the above raw listing:
[IPCChannelTest.SendMessageInChannelConnected, IPCSyncChannelTest.Simple,
IPCSyncChannelTest.DISABLED_SendWithTimeoutMixedOKAndTimeout]
"""
ret = []
current = ''
for test in raw_list:
if not test:
continue
if test[0] != ' ':
test_case = test.split()[0]
if test_case.endswith('.'):
current = test_case
elif not 'YOU HAVE' in test:
test_name = test.split()[0]
ret += [current + test_name]
return ret
def ParseGTestOutput(output):
"""Parses raw gtest output and returns a list of results.
Args:
output: A list of output lines.
Returns:
A list of base_test_result.BaseTestResults.
"""
duration = 0
fallback_result_type = None
log = []
result_type = None
results = []
test_name = None
def handle_possibly_unknown_test():
if test_name is not None:
results.append(base_test_result.BaseTestResult(
test_name,
fallback_result_type or base_test_result.ResultType.UNKNOWN,
duration, log=('\n'.join(log) if log else '')))
for l in output:
logging.info(l)
matcher = _RE_TEST_STATUS.match(l)
if matcher:
if matcher.group(1) == 'RUN':
handle_possibly_unknown_test()
duration = 0
fallback_result_type = None
log = []
result_type = None
elif matcher.group(1) == 'OK':
result_type = base_test_result.ResultType.PASS
elif matcher.group(1) == 'FAILED':
result_type = base_test_result.ResultType.FAIL
elif matcher.group(1) == 'CRASHED':
fallback_result_type = base_test_result.ResultType.CRASH
# Be aware that test name and status might not appear on same line.
test_name = matcher.group(2) if matcher.group(2) else test_name
duration = int(matcher.group(3)) if matcher.group(3) else 0
else:
# Needs another matcher here to match crashes, like those of DCHECK.
matcher = _RE_TEST_CURRENTLY_RUNNING.match(l)
if matcher:
test_name = matcher.group(1)
result_type = base_test_result.ResultType.CRASH
duration = 0 # Don't know.
if log is not None:
log.append(l)
if result_type and test_name:
results.append(base_test_result.BaseTestResult(
test_name, result_type, duration,
log=('\n'.join(log) if log else '')))
test_name = None
handle_possibly_unknown_test()
return results
def ParseGTestXML(xml_content):
"""Parse gtest XML result."""
results = []
html = HTMLParser.HTMLParser()
# TODO(jbudorick): Unclear how this handles crashes.
testsuites = xml.etree.ElementTree.fromstring(xml_content)
for testsuite in testsuites:
suite_name = testsuite.attrib['name']
for testcase in testsuite:
case_name = testcase.attrib['name']
result_type = base_test_result.ResultType.PASS
log = []
for failure in testcase:
result_type = base_test_result.ResultType.FAIL
log.append(html.unescape(failure.attrib['message']))
results.append(base_test_result.BaseTestResult(
'%s.%s' % (suite_name, case_name),
result_type,
int(float(testcase.attrib['time']) * 1000),
log=('\n'.join(log) if log else '')))
return results
def ConvertTestFilterFileIntoGTestFilterArgument(input_lines):
"""Converts test filter file contents into --gtest_filter argument.
See //testing/buildbot/filters/README.md for description of the
syntax that |input_lines| are expected to follow.
See
https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md#running-a-subset-of-the-tests
for description of the syntax that --gtest_filter argument should follow.
Args:
input_lines: An iterable (e.g. a list or a file) containing input lines.
Returns:
a string suitable for feeding as an argument of --gtest_filter parameter.
"""
# Strip whitespace + skip empty lines and lines beginning with '#'.
stripped_lines = (l.strip() for l in input_lines)
filter_lines = list(l for l in stripped_lines if l and l[0] != '#')
# Split the tests into positive and negative patterns (gtest treats
# every pattern after the first '-' sign as an exclusion).
positive_patterns = ':'.join(l for l in filter_lines if l[0] != '-')
negative_patterns = ':'.join(l[1:] for l in filter_lines if l[0] == '-')
if negative_patterns:
negative_patterns = '-' + negative_patterns
# Join the filter lines into one, big --gtest_filter argument.
return positive_patterns + negative_patterns
class GtestTestInstance(test_instance.TestInstance):
def __init__(self, args, isolate_delegate, error_func):
super(GtestTestInstance, self).__init__()
# TODO(jbudorick): Support multiple test suites.
if len(args.suite_name) > 1:
raise ValueError('Platform mode currently supports only 1 gtest suite')
self._exe_dist_dir = None
self._extract_test_list_from_filter = args.extract_test_list_from_filter
self._shard_timeout = args.shard_timeout
self._store_tombstones = args.store_tombstones
self._suite = args.suite_name[0]
# GYP:
if args.executable_dist_dir:
self._exe_dist_dir = os.path.abspath(args.executable_dist_dir)
else:
# TODO(agrieve): Remove auto-detection once recipes pass flag explicitly.
exe_dist_dir = os.path.join(constants.GetOutDirectory(),
'%s__dist' % self._suite)
if os.path.exists(exe_dist_dir):
self._exe_dist_dir = exe_dist_dir
incremental_part = ''
if args.test_apk_incremental_install_script:
incremental_part = '_incremental'
apk_path = os.path.join(
constants.GetOutDirectory(), '%s_apk' % self._suite,
'%s-debug%s.apk' % (self._suite, incremental_part))
self._test_apk_incremental_install_script = (
args.test_apk_incremental_install_script)
if not os.path.exists(apk_path):
self._apk_helper = None
else:
self._apk_helper = apk_helper.ApkHelper(apk_path)
self._extras = {
_EXTRA_NATIVE_TEST_ACTIVITY: self._apk_helper.GetActivityName(),
}
if self._suite in RUN_IN_SUB_THREAD_TEST_SUITES:
self._extras[_EXTRA_RUN_IN_SUB_THREAD] = 1
if self._suite in BROWSER_TEST_SUITES:
self._extras[_EXTRA_SHARD_SIZE_LIMIT] = 1
self._extras[EXTRA_SHARD_NANO_TIMEOUT] = int(1e9 * self._shard_timeout)
self._shard_timeout = 10 * self._shard_timeout
if not self._apk_helper and not self._exe_dist_dir:
error_func('Could not find apk or executable for %s' % self._suite)
self._data_deps = []
if args.test_filter:
self._gtest_filter = args.test_filter
elif args.test_filter_file:
with open(args.test_filter_file, 'r') as f:
self._gtest_filter = ConvertTestFilterFileIntoGTestFilterArgument(f)
else:
self._gtest_filter = None
if (args.isolate_file_path and
not isolator.IsIsolateEmpty(args.isolate_file_path)):
self._isolate_abs_path = os.path.abspath(args.isolate_file_path)
self._isolate_delegate = isolate_delegate
self._isolated_abs_path = os.path.join(
constants.GetOutDirectory(), '%s.isolated' % self._suite)
else:
logging.warning('%s isolate file provided. No data deps will be pushed.',
'Empty' if args.isolate_file_path else 'No')
self._isolate_delegate = None
if args.app_data_files:
self._app_data_files = args.app_data_files
if args.app_data_file_dir:
self._app_data_file_dir = args.app_data_file_dir
else:
self._app_data_file_dir = tempfile.mkdtemp()
logging.critical('Saving app files to %s', self._app_data_file_dir)
else:
self._app_data_files = None
self._app_data_file_dir = None
self._test_arguments = args.test_arguments
# TODO(jbudorick): Remove this once it's deployed.
self._enable_xml_result_parsing = args.enable_xml_result_parsing
@property
def activity(self):
return self._apk_helper and self._apk_helper.GetActivityName()
@property
def apk(self):
return self._apk_helper and self._apk_helper.path
@property
def apk_helper(self):
return self._apk_helper
@property
def app_file_dir(self):
return self._app_data_file_dir
@property
def app_files(self):
return self._app_data_files
@property
def enable_xml_result_parsing(self):
return self._enable_xml_result_parsing
@property
def exe_dist_dir(self):
return self._exe_dist_dir
@property
def extras(self):
return self._extras
@property
def gtest_filter(self):
return self._gtest_filter
@property
def package(self):
return self._apk_helper and self._apk_helper.GetPackageName()
@property
def permissions(self):
return self._apk_helper and self._apk_helper.GetPermissions()
@property
def runner(self):
return self._apk_helper and self._apk_helper.GetInstrumentationName()
@property
def shard_timeout(self):
return self._shard_timeout
@property
def store_tombstones(self):
return self._store_tombstones
@property
def suite(self):
return self._suite
@property
def test_apk_incremental_install_script(self):
return self._test_apk_incremental_install_script
@property
def test_arguments(self):
return self._test_arguments
@property
def extract_test_list_from_filter(self):
return self._extract_test_list_from_filter
#override
def TestType(self):
return 'gtest'
#override
def SetUp(self):
"""Map data dependencies via isolate."""
if self._isolate_delegate:
self._isolate_delegate.Remap(
self._isolate_abs_path, self._isolated_abs_path)
self._isolate_delegate.PurgeExcluded(_DEPS_EXCLUSION_LIST)
self._isolate_delegate.MoveOutputDeps()
dest_dir = None
self._data_deps.extend([
(self._isolate_delegate.isolate_deps_dir, dest_dir)])
def GetDataDependencies(self):
"""Returns the test suite's data dependencies.
Returns:
A list of (host_path, device_path) tuples to push. If device_path is
None, the client is responsible for determining where to push the file.
"""
return self._data_deps
def FilterTests(self, test_list, disabled_prefixes=None):
"""Filters |test_list| based on prefixes and, if present, a filter string.
Args:
test_list: The list of tests to filter.
disabled_prefixes: A list of test prefixes to filter. Defaults to
DISABLED_, FLAKY_, FAILS_, PRE_, and MANUAL_
Returns:
A filtered list of tests to run.
"""
gtest_filter_strings = [
self._GenerateDisabledFilterString(disabled_prefixes)]
if self._gtest_filter:
gtest_filter_strings.append(self._gtest_filter)
filtered_test_list = test_list
for gtest_filter_string in gtest_filter_strings:
logging.debug('Filtering tests using: %s', gtest_filter_string)
filtered_test_list = unittest_util.FilterTestNames(
filtered_test_list, gtest_filter_string)
return filtered_test_list
def _GenerateDisabledFilterString(self, disabled_prefixes):
disabled_filter_items = []
if disabled_prefixes is None:
disabled_prefixes = ['DISABLED_', 'FLAKY_', 'FAILS_', 'PRE_', 'MANUAL_']
disabled_filter_items += ['%s*' % dp for dp in disabled_prefixes]
disabled_filter_items += ['*.%s*' % dp for dp in disabled_prefixes]
disabled_tests_file_path = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'pylib', 'gtest',
'filter', '%s_disabled' % self._suite)
if disabled_tests_file_path and os.path.exists(disabled_tests_file_path):
with open(disabled_tests_file_path) as disabled_tests_file:
disabled_filter_items += [
'%s' % l for l in (line.strip() for line in disabled_tests_file)
if l and not l.startswith('#')]
return '*-%s' % ':'.join(disabled_filter_items)
#override
def TearDown(self):
"""Clear the mappings created by SetUp."""
if self._isolate_delegate:
self._isolate_delegate.Clear()
| geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/build/android/pylib/gtest/gtest_test_instance.py | Python | gpl-3.0 | 14,910 |
l = {'T': 10, 'J':11, 'Q':12, 'K':13, 'A':14}
def card_ranks(cards):
"Return a list of the ranks, sorted with higher first."
ranks = [r for r,s in cards]
ranks = [l[r] if r in l else int(r) for r in ranks]
ranks.sort(reverse=True)
return ranks
print(card_ranks(['AC', '3D', '4S', 'KH']))#should output [14, 13, 4, 3] | drobotnik/Py2 | Udacity/Random.py | Python | gpl-2.0 | 340 |
# This file is part of the Waymarked Trails Map Project
# Copyright (C) 2015 Sarah Hoffmann
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" Configuration for the MTB map.
"""
from db.configs import *
from os.path import join as os_join
from config.defaults import MEDIA_ROOT
MAPTYPE = 'routes'
ROUTEDB = RouteDBConfig()
ROUTEDB.schema = 'mtb'
ROUTEDB.relation_subset = """
tags ? 'route' and tags->'type' IN ('route', 'superroute')
AND 'mtb' = any(regexp_split_to_array(tags->'route', ';'))
AND NOT (tags ? 'state' AND tags->'state' = 'proposed')"""
ROUTES = RouteTableConfig()
ROUTES.network_map = { 'icn': 0,'ncn': 10, 'rcn': 20, 'lcn': 30 }
ROUTES.symbols = ( 'SwissMobile',
'TextSymbol',
'JelRef',
'ColorBox')
DEFSTYLE = RouteStyleTableConfig()
GUIDEPOSTS = GuidePostConfig()
GUIDEPOSTS.subtype = 'mtb'
GUIDEPOSTS.require_subtype = True
SYMBOLS = ShieldConfiguration()
SYMBOLS.symbol_outdir = os_join(MEDIA_ROOT, 'symbols/mtb')
SYMBOLS.swiss_mobil_bgcolor = (0.88, 0.83, 0.32)
SYMBOLS.swiss_mobil_networks = ('rcn', 'ncn')
| hholzgra/waymarked-trails-site | maps/mtb.py | Python | gpl-3.0 | 1,738 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jiri "NoxArt" Petruzelka
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# @author Jiri "NoxArt" Petruzelka | petruzelka@noxart.cz | @NoxArt
# @copyright (c) 2012 Jiri "NoxArt" Petruzelka
# @link https://github.com/NoxArt/SublimeText2-FTPSync
# Doc comment syntax inspired by http://stackoverflow.com/a/487203/387503
# ==== Libraries ===========================================================================
# Sublime Text 2 API: see http://www.sublimetext.com/docs/2/api_reference.html
# Sublime Text 3 API: see http://www.sublimetext.com/docs/3/api_reference.html
import sublime
import sublime_plugin
# Python's built-in libraries
import copy
import hashlib
import os
import re
import shutil
import sys
import threading
import traceback
import webbrowser
from time import sleep
# FTPSync libraries
if sys.version < '3':
import lib2.simplejson as json
from ftpsynccommon import Types
from ftpsyncwrapper import CreateConnection, TargetAlreadyExists
from ftpsyncprogress import Progress
from ftpsyncfiles import getFolders, findFile, getFiles, formatTimestamp, gatherMetafiles, replace, addLinks, fileToMetafile
from ftpsyncworker import Worker
from ftpsyncfilewatcher import FileWatcher
# exceptions
from ftpsyncexceptions import FileNotFoundException
else:
import FTPSync.lib3.simplejson as json
from FTPSync.ftpsynccommon import Types
from FTPSync.ftpsyncwrapper import CreateConnection, TargetAlreadyExists
from FTPSync.ftpsyncprogress import Progress
from FTPSync.ftpsyncfiles import getFolders, findFile, getFiles, formatTimestamp, gatherMetafiles, replace, addLinks, fileToMetafile
from FTPSync.ftpsyncworker import Worker
from FTPSync.ftpsyncfilewatcher import FileWatcher
# exceptions
from FTPSync.ftpsyncexceptions import FileNotFoundException
# ==== Initialization and optimization =====================================================
__dir__ = os.path.dirname(os.path.realpath(__file__))
isLoaded = False
isDebug = True
# print overly informative messages?
isDebugVerbose = True
# default config for a project
projectDefaults = {}
nested = []
index = 0
# global config key - for specifying global config in settings file
globalConfigKey = '__global'
ignore = False
# time format settings
timeFormat = ""
# delay before check of right opened file is performed, cancelled if closed in the meantime
downloadOnOpenDelay = 0
coreConfig = {}
browseConfig = {}
# name of a file to be detected in the project
configName = 'ftpsync.settings'
# name of a file that is a default sheet for new configs for projects
connectionDefaultsFilename = 'ftpsync.default-settings'
# timeout for a Sublime status bar messages [ms]
messageTimeout = 250
# comment removing regexp
removeLineComment = re.compile('//.*', re.I)
# deprecated names
deprecatedNames = {
"check_time": "overwrite_newer_prevention"
}
# connection cache pool - all connections
connections = {}
# connections currently marked as {in use}
usingConnections = []
# root check cache
rootCheckCache = {}
# individual folder config cache, file => config path
configs = {}
# scheduled delayed uploads, file_path => action id
scheduledUploads = {}
# limit of workers
workerLimit = 0
# debug workers?
debugWorkers = False
# debug json?
debugJson = False
# overwrite cancelled
overwriteCancelled = []
# last navigation
navigateLast = {
'config_file': None,
'connection_name': None,
'path': None
}
displayDetails = False
displayPermissions = False
displayTimestampFormat = False
# last folder
re_thisFolder = re.compile("/([^/]*?)/?$", re.I)
re_parentFolder = re.compile("/([^/]*?)/[^/]*?/?$", re.I)
# watch pre-scan
preScan = {}
# temporarily remembered passwords
#
# { settings_filepath => { connection_name => password }, ... }
passwords = {}
# Overriding config for on-the-fly modifications
overridingConfig = {}
def isString(var):
var_type = type(var)
if sys.version[0] == '3':
return var_type is str or var_type is bytes
else:
return var_type is str or var_type is unicode
def plugin_loaded():
global browseConfig
global coreConfig
global debugJson
global debugWorkers
global displayDetails
global displayPermissions
global displayTimestampFormat
global downloadOnOpenDelay
global ignore
global index
global isDebug
global isDebugVerbose
global isLoaded
global nested
global projectDefaults
global re_ignore
global settings
global systemNotifications
global timeFormat
global workerLimit
# global config
settings = sublime.load_settings('FTPSync.sublime-settings')
# test settings
if settings.get('project_defaults') is None:
print ("="*86)
print ("FTPSync > Error loading settings ... please restart Sublime Text after installation")
print ("="*86)
# print debug messages to console?
isDebug = settings.get('debug')
# print overly informative messages?
isDebugVerbose = settings.get('debug_verbose')
# default config for a project
projectDefaults = settings.get('project_defaults')
index = 0
for item in projectDefaults.items():
if type(item[1]) is dict:
nested.append(index)
index += 1
# global ignore pattern
ignore = settings.get('ignore')
# time format settings
timeFormat = settings.get('time_format')
# delay before check of right opened file is performed, cancelled if closed in the meantime
downloadOnOpenDelay = settings.get('download_on_open_delay')
# system notifications
systemNotifications = settings.get('system_notifications')
# compiled global ignore pattern
if isString(ignore):
re_ignore = re.compile(ignore)
else:
re_ignore = None
# loaded project's config will be merged with this global one
coreConfig = {
'ignore': ignore,
'debug_verbose': settings.get('debug_verbose'),
'ftp_retry_limit': settings.get('ftp_retry_limit'),
'ftp_retry_delay': settings.get('ftp_retry_delay'),
'connection_timeout': settings.get('connection_timeout'),
'ascii_extensions': settings.get('ascii_extensions'),
'binary_extensions': settings.get('binary_extensions')
}
browseConfig = {
'browse_display_details': settings.get('browse_display_details'),
'browse_open_on_download': settings.get('browse_open_on_download'),
'browse_display_permission': settings.get('browse_display_permission'),
'browse_timestamp_format': settings.get('browse_timestamp_format'),
'browse_folder_prefix': settings.get('browse_folder_prefix'),
'browse_folder_suffix': settings.get('browse_folder_suffix'),
'browse_file_prefix': settings.get('browse_file_prefix'),
'browse_file_suffix': settings.get('browse_file_suffix'),
'browse_up': settings.get('browse_up'),
'browse_action_prefix': settings.get('browse_action_prefix')
}
# limit of workers
workerLimit = settings.get('max_threads')
# debug workers?
debugWorkers = settings.get('debug_threads')
# debug json?
debugJson = settings.get('debug_json')
# browsing
displayDetails = settings.get('browse_display_details')
displayPermissions = settings.get('browse_display_permission')
displayTimestampFormat = settings.get('browse_timestamp_format')
isLoaded = True
if isDebug:
print ('FTPSync > plugin async loaded')
if int(sublime.version()) < 3000:
plugin_loaded()
# ==== Generic =============================================================================
# Returns file with syntax for settings file
def getConfigSyntax():
return 'Packages/FTPSync/Settings.tmLanguage'
# Returns if Sublime has currently active View
#
# ST3 on no opened view returns a View with empty file_name (wtf)
#
# @return boolean
def hasActiveView():
window = sublime.active_window()
if window is None:
return False
view = window.active_view()
if view is None or view.file_name() is None:
return False
return True
# Dumps the exception to console
def handleException(exception):
print ("FTPSync > Exception in user code:")
print ('-' * 60)
traceback.print_exc(file=sys.stdout)
print ('-' * 60)
# Safer print of exception message
def stringifyException(exception):
return str(exception)
# Checks whether cerain package exists
def packageExists(packageName):
return os.path.exists(os.path.join(sublime.packages_path(), packageName))
def decode(string):
if hasattr('x', 'decode') and callable(getattr('x', 'decode')):
return string.decode('utf-8')
else:
return string
# ==== Messaging ===========================================================================
# Shows a message into Sublime's status bar
#
# @type text: string
# @param text: message to status bar
def statusMessage(text):
sublime.status_message(text)
# Schedules a single message to be logged/shown
#
# @type text: string
# @param text: message to status bar
#
# @global messageTimeout
def dumpMessage(text):
sublime.set_timeout(lambda: statusMessage(text), messageTimeout)
# Prints a special message to console and optionally to status bar
#
# @type text: string
# @param text: message to status bar
# @type name: string|None
# @param name: comma-separated list of connections or other auxiliary info
# @type onlyVerbose: boolean
# @param onlyVerbose: print only if config has debug_verbose enabled
# @type status: boolean
# @param status: show in status bar as well = true
#
# @global isDebug
# @global isDebugVerbose
def printMessage(text, name=None, onlyVerbose=False, status=False):
message = "FTPSync"
if name is not None:
message += " [" + name + "]"
message += " > "
message += text
if isDebug and (onlyVerbose is False or isDebugVerbose is True):
print (message.encode('utf-8'))
if status:
dumpMessage(message)
# Issues a system notification for certian event
#
# @type text: string
# @param text: notification message
def systemNotify(text):
try:
import subprocess
text = "FTPSync > " + text
if sys.platform == "darwin":
""" Run Grown Notification """
cmd = '/usr/local/bin/growlnotify -a "Sublime Text 2" -t "FTPSync message" -m "'+text+'"'
subprocess.call(cmd,shell=True)
elif sys.platform == "linux2":
subprocess.call('/usr/bin/notify-send "Sublime Text 2" "'+text+'"',shell=True)
elif sys.platform == "win32":
""" Find the notifaction platform for windows if there is one"""
except Exception as e:
printMessage("Notification failed")
handleExceptions(e)
# Creates a process message with progress bar (to be used in status bar)
#
# @type stored: list<string>
# @param stored: usually list of connection names
# @type progress: Progress
# @type action: string
# @type action: action that the message reports about ("uploaded", "downloaded"...)
# @type basename: string
# @param basename: name of a file connected with the action
#
# @return string message
def getProgressMessage(stored, progress, action, basename = None):
base = "FTPSync [remotes: " + ",".join(stored) + "] "
action = "> " + action + " "
if progress is not None:
base += " ["
percent = progress.getPercent()
for i in range(0, int(percent)):
base += "="
for i in range(int(percent), 20):
base += "--"
base += " " + str(progress.current) + "/" + str(progress.getTotal()) + "] "
base += action
if basename is not None:
base += " {" + basename + "}"
return base
# ==== Config =============================================================================
# Alters override config
#
# @type config_dir_name: string
# @param config_dir_name: path to a folder of a config
# @type property: string
# @param property: property to be modified
# @type value: mixed
# @type specificName: string
# @param specificName: use to only modify specific connection's value
#
# @global overrideConfig
def overrideConfig(config_file_path, property, value, specificName=None):
if config_file_path is None or os.path.exists(config_file_path) is False:
return
config = loadConfig(config_file_path)
if config_file_path not in overridingConfig:
overridingConfig[config_file_path] = { 'connections': {} }
for name in config['connections']:
if specificName and name != specificName:
continue
if name not in overridingConfig[config_file_path]['connections']:
overridingConfig[config_file_path]['connections'][name] = {}
overridingConfig[config_file_path]['connections'][name][property] = value
# Invalidates all config cache entries belonging to a certain directory
# as long as they're empty or less nested in the filesystem
#
# @type config_dir_name: string
# @param config_dir_name: path to a folder of a config to be invalidated
#
# @global configs
def invalidateConfigCache(config_dir_name):
for file_path in configs:
if file_path.startswith(config_dir_name) and (configs[file_path] is None or config_dir_name.startswith(configs[file_path])):
configs.remove(configs[file_path])
# Finds a config file in given folders
#
# @type folders: list<string>
# @param folders: list of paths to folders to filter
#
# @return list<string> of file paths
#
# @global configName
def findConfigFile(folders):
return findFile(folders, configName)
# Returns first found config file from folders
#
# @type folders: list<string>
# @param folders: list of paths to folders to search in
#
# @return config filepath
def guessConfigFile(folders):
for folder in folders:
config = getConfigFile(folder)
if config is not None:
return config
for folder in os.walk(folder):
config = getConfigFile(folder[0])
if config is not None:
return config
return None
# Returns configuration file for a given file
#
# @type file_path: string
# @param file_path: file_path to the file for which we try to find a config
#
# @return file path to the config file or None
#
# @global configs
def getConfigFile(file_path):
cacheKey = file_path
if isString(cacheKey) is False:
cacheKey = cacheKey.decode('utf-8')
# try cached
try:
if configs[cacheKey] and os.path.exists(configs[cacheKey]) and os.path.getsize(configs[cacheKey]) > 0:
printMessage("Loading config: cache hit (key: " + cacheKey + ")")
return configs[cacheKey]
else:
raise KeyError
# cache miss
except KeyError:
try:
folders = getFolders(file_path)
if folders is None or len(folders) == 0:
return None
configFolder = findConfigFile(folders)
if configFolder is None:
printMessage("Found no config for {" + cacheKey + "}", None, True)
return None
config = os.path.join(configFolder, configName)
configs[cacheKey] = config
return config
except AttributeError:
return None
# Returns hash of file_path
#
# @type file_path: string
# @param file_path: file path to the file of which we want the hash
#
# @return hash of filepath
def getFilepathHash(file_path):
return hashlib.md5(file_path.encode('utf-8')).hexdigest()
# Returns path of file from its config file
#
# @type file_path: string
# @param file_path: file path to the file of which we want the hash
#
# @return string file path from settings root
def getRootPath(file_path, prefix = ''):
return prefix + os.path.relpath(file_path, os.path.dirname(getConfigFile(file_path))).replace('\\', '/')
# Returns a file path associated with view
#
# @type file_path: string
# @param file_path: file path to the file of which we want the hash
#
# @return string file path
def getFileName(view):
return view.file_name()
# Gathers all entries from selected paths
#
# @type file_path: list<string>
# @param file_path: list of file/folder paths
#
# @return list of file/folder paths
def gatherFiles(paths):
syncFiles = []
fileNames = []
for target in paths:
if os.path.isfile(target):
if target not in fileNames:
fileNames.append(target)
syncFiles.append([target, getConfigFile(target)])
elif os.path.isdir(target):
empty = True
for root, dirs, files in os.walk(target):
for file_path in files:
empty = False
if file_path not in fileNames:
fileNames.append(target)
syncFiles.append([os.path.join(root, file_path), getConfigFile(os.path.join(root, file_path))])
for folder in dirs:
path = os.path.join(root, folder)
if not os.listdir(path) and path not in fileNames:
fileNames.append(path)
syncFiles.append([path, getConfigFile(path)])
if empty is True:
syncFiles.append([target, getConfigFile(target)])
return syncFiles
# Returns hash of configuration contents
#
# @type config: dict
#
# @return string
#
# @link http://stackoverflow.com/a/8714242/387503
def getObjectHash(o):
if isinstance(o, set) or isinstance(o, tuple) or isinstance(o, list):
return tuple([getObjectHash(e) for e in o])
elif not isinstance(o, dict):
return hash(o)
new_o = copy.deepcopy(o)
for k, v in new_o.items():
new_o[k] = getObjectHash(v)
return hash(tuple(frozenset(new_o.items())))
# Updates deprecated config to newer version
#
# @type config: dict
#
# @return dict (config)
#
# @global deprecatedNames
def updateConfig(config):
for old_name in deprecatedNames:
new_name = deprecatedNames[old_name]
if new_name in config:
config[old_name] = config[new_name]
elif old_name in config:
config[new_name] = config[old_name]
return config
# Verifies contents of a given config object
#
# Checks that it's an object with all needed keys of a proper type
# Does not check semantic validity of the content
#
# Should be used on configs merged with the defaults
#
# @type config: dict
# @param config: config dict
#
# @return string verification fail reason or a boolean
def verifyConfig(config):
if type(config) is not dict:
return "Config is not a {dict} type"
keys = ["username", "password", "private_key", "private_key_pass", "path", "encoding", "tls", "use_tempfile", "upload_on_save", "port", "timeout", "ignore", "check_time", "download_on_open", "upload_delay", "after_save_watch", "time_offset", "set_remote_lastmodified", "default_folder_permissions", "default_local_permissions", "always_sync_local_permissions"]
for key in keys:
if key not in config:
return "Config is missing a {" + key + "} key"
if config['username'] is not None and isString(config['username']) is False:
return "Config entry 'username' must be null or string, " + str(type(config['username'])) + " given"
if config['password'] is not None and isString(config['password']) is False:
return "Config entry 'password' must be null or string, " + str(type(config['password'])) + " given"
if config['private_key'] is not None and isString(config['private_key']) is False:
return "Config entry 'private_key' must be null or string, " + str(type(config['private_key'])) + " given"
if config['private_key_pass'] is not None and isString(config['private_key_pass']) is False:
return "Config entry 'private_key_pass' must be null or string, " + str(type(config['private_key_pass'])) + " given"
if config['ignore'] is not None and isString(config['ignore']) is False:
return "Config entry 'ignore' must be null or string, " + str(type(config['ignore'])) + " given"
if isString(config['path']) is False:
return "Config entry 'path' must be a string, " + str(type(config['path'])) + " given"
if config['encoding'] is not None and isString(config['encoding']) is False:
return "Config entry 'encoding' must be a string, " + str(type(config['encoding'])) + " given"
if type(config['tls']) is not bool:
return "Config entry 'tls' must be true or false, " + str(type(config['tls'])) + " given"
if type(config['passive']) is not bool:
return "Config entry 'passive' must be true or false, " + str(type(config['passive'])) + " given"
if type(config['use_tempfile']) is not bool:
return "Config entry 'use_tempfile' must be true or false, " + str(type(config['use_tempfile'])) + " given"
if type(config['set_remote_lastmodified']) is not bool:
return "Config entry 'set_remote_lastmodified' must be true or false, " + str(type(config['set_remote_lastmodified'])) + " given"
if type(config['upload_on_save']) is not bool:
return "Config entry 'upload_on_save' must be true or false, " + str(type(config['upload_on_save'])) + " given"
if type(config['check_time']) is not bool:
return "Config entry 'check_time' must be true or false, " + str(type(config['check_time'])) + " given"
if type(config['download_on_open']) is not bool:
return "Config entry 'download_on_open' must be true or false, " + str(type(config['download_on_open'])) + " given"
if type(config['upload_delay']) is not int and type(config['upload_delay']) is not long:
return "Config entry 'upload_delay' must be integer or long, " + str(type(config['upload_delay'])) + " given"
if config['after_save_watch'] is not None and type(config['after_save_watch']) is not list:
return "Config entry 'after_save_watch' must be null or list, " + str(type(config['after_save_watch'])) + " given"
if type(config['port']) is not int and type(config['port']) is not long:
return "Config entry 'port' must be an integer or long, " + str(type(config['port'])) + " given"
if type(config['timeout']) is not int and type(config['timeout']) is not long:
return "Config entry 'timeout' must be an integer or long, " + str(type(config['timeout'])) + " given"
if type(config['time_offset']) is not int and type(config['time_offset']) is not long:
return "Config entry 'time_offset' must be an integer or long, " + str(type(config['time_offset'])) + " given"
return True
# Parses JSON-type file with comments stripped out (not part of a proper JSON, see http://json.org/)
#
# @type file_path: string
#
# @return dict|None
#
# @global removeLineComment
def parseJson(file_path):
attempts = 3
succeeded = False
while attempts > 0:
attempts = attempts - 1
try:
json = parseJsonInternal(file_path)
if debugJson:
printMessage("Type returned: " + str(type(json)))
printMessage("Is empty: " + str(bool(json)))
succeeded = type(json) is dict and bool(json) is True
break
except Exception as e:
handleException(e)
printMessage("Retrying reading config... (remaining " + str(attempts) + ")")
sleep(0.1)
if succeeded:
return json
else:
printMessage("Failed to read settings from file: " + str(file_path))
return {}
# Parses JSON-type file with comments stripped out (not part of a proper JSON, see http://json.org/)
#
# @type file_path: string
#
# @return dict
#
# @global removeLineComment
def parseJsonInternal(file_path):
if isString(file_path) is False:
raise Exception("Expected filepath as string, " + str(type(file_path)) + " given")
if os.path.exists(file_path) is False:
raise IOError("File " + str(file_path) + " does not exist")
if os.path.getsize(file_path) == 0:
raise IOError("File " + str(file_path) + " is empty")
contents = ""
try:
file = open(file_path, 'r')
for line in file:
contents += removeLineComment.sub('', line).strip()
finally:
file.close()
decoder = json.JSONDecoder()
if debugJson:
printMessage("Debug JSON:")
print ("="*86)
print (contents)
print ("="*86)
if len(contents) > 0:
return decoder.decode(contents)
else:
raise IOError('Content read from ' + str(file_path) + ' is empty')
# Asks for passwords if missing in configuration
#
# @type config_file_path: string
# @type config: dict
# @param config: configuration object
# @type callback: callback
# @param callback: what should be done after config is filled
# @type window: Window
# @param window: SublimeText2 API Window object
#
# @global passwords
def addPasswords(config_file_path, config, callback, window):
def setPassword(config, name, password):
config['connections'][name]['password'] = password
if config_file_path not in passwords:
passwords[config_file_path] = {}
passwords[config_file_path][name] = password
addPasswords(config_file_path, config, callback, window)
def ask(connectionName, host, username):
window.show_input_panel('FTPSync > please provide password for: ' + str(host) + ' ~ ' + str(username), "", lambda password: setPassword(config, connectionName, password), None, None)
if type(config) is dict:
for name in config['connections']:
prop = config['connections'][name]
if prop['password'] is None:
if config_file_path in passwords and name in passwords[config_file_path] and passwords[config_file_path][name] is not None:
config['connections'][name]['password'] = passwords[config_file_path][name]
else:
ask(name, prop['host'], prop['username'])
return
return callback()
# Fills passwords if missing in configuration
#
# @type fileList: [ [ filepath, config_file_path ], ... ]
# @type callback: callback
# @param callback: what should be done after config is filled
# @type window: Window
# @param window: SublimeText2 API Window object
#
# @global passwords
def fillPasswords(fileList, callback, window, index = 0):
def ask():
fillPasswords(fileList, callback, window, index + 1)
i = 0
length = len(fileList)
if index >= length:
callback(fileList)
return
config_files = []
for filepath, config_file_path in fileList:
if config_file_path not in config_files:
config_files.append(config_file_path)
for config_file_path in config_files:
if i < index:
i = i + 1
continue
if config_file_path is None:
continue
config = loadConfig(config_file_path)
if config is not None:
addPasswords(config_file_path, config, ask, window)
return
callback(fileList)
# Parses given config and adds default values to each connection entry
#
# @type file_path: string
# @param file_path: file path to the file of which we want the hash
#
# @return config dict or None
#
# @global isLoaded
# @global coreConfig
# @global projectDefaults
def loadConfig(file_path):
if isLoaded is False:
printMessage("FTPSync is not loaded (just installed?), please restart Sublime Text")
return None
if isString(file_path) is False:
printMessage("LoadConfig expects string, " + str(type(file_path)) + " given")
return None
if os.path.exists(file_path) is False:
return None
# parse config
try:
config = parseJson(file_path)
except Exception as e:
printMessage("Failed parsing configuration file: {" + file_path + "} (commas problem?) [Exception: " + stringifyException(e) + "]", status=True)
handleException(e)
return None
result = {}
# merge with defaults and check
for name in config:
if type(config[name]) is not dict:
printMessage("Failed using configuration: contents are not dictionaries but values", status=True)
return None
result[name] = dict(list(projectDefaults.items()) + list(config[name].items()))
result[name]['file_path'] = file_path
# fix path
if len(result[name]['path']) > 1 and result[name]['path'][-1] != "/":
result[name]['path'] = result[name]['path'] + "/"
# merge nested
for index in nested:
list1 = list(list(projectDefaults.items())[index][1].items())
list2 = list(result[name][list(projectDefaults.items())[index][0]].items())
result[name][list(projectDefaults.items())[index][0]] = dict(list1 + list2)
try:
if result[name]['debug_extras']['dump_config_load'] is True:
print(result[name])
except KeyError:
pass
# add passwords
if file_path in passwords and name in passwords[file_path] and passwords[file_path][name] is not None:
result[name]['password'] = passwords[file_path][name]
result[name] = updateConfig(result[name])
verification_result = verifyConfig(result[name])
if verification_result is not True:
printMessage("Invalid configuration loaded: <" + str(verification_result) + ">", status=True)
# merge with generics
final = dict(list(coreConfig.items()) + list({"connections": result}.items()))
# override by overridingConfig
if file_path in overridingConfig:
for name in overridingConfig[file_path]['connections']:
if name in final['connections']:
for item in overridingConfig[file_path]['connections'][name]:
final['connections'][name][item] = overridingConfig[file_path]['connections'][name][item]
return final
# ==== Remote =============================================================================
# Creates a new connection
#
# @type config: object
# @param config: configuration object
# @type hash: string
# @param hash: connection cache hash (config filepath hash actually)
#
# @return list of descendants of AbstractConnection (ftpsyncwrapper.py)
def makeConnection(config, hash=None, handleExceptions=True):
result = []
# for each config
for name in config['connections']:
properties = config['connections'][name]
# 1. initialize
try:
connection = CreateConnection(config, name)
except Exception as e:
if handleExceptions is False:
raise
printMessage("Connection initialization failed [Exception: " + stringifyException(e) + "]", name, status=True)
handleException(e)
return []
# 2. connect
try:
connection.connect()
except Exception as e:
if handleExceptions is False:
raise
printMessage("Connection failed [Exception: " + stringifyException(e) + "]", name, status=True)
connection.close(connections, hash)
handleException(e)
return []
printMessage("Connected to: " + properties['host'] + ":" + str(properties['port']) + " (timeout: " + str(properties['timeout']) + ") (key: " + str(hash) + ")", name)
# 3. authenticate
try:
if connection.authenticate():
printMessage("Authentication processed", name)
except Exception as e:
if handleExceptions is False:
raise
printMessage("Authentication failed [Exception: " + stringifyException(e) + "]", name, status=True)
handleException(e)
return []
# 4. login
if properties['username'] is not None and properties['password'] is not None:
try:
connection.login()
except Exception as e:
printMessage("Login failed [Exception: " + stringifyException(e) + "]", name, status=True)
handleException(e)
if properties['file_path'] in passwords and name in passwords[properties['file_path']]:
passwords[properties['file_path']][name] = None
if handleExceptions is False:
raise
return []
pass_present = " (using password: NO)"
if len(properties['password']) > 0:
pass_present = " (using password: YES)"
printMessage("Logged in as: " + properties['username'] + pass_present, name)
else:
printMessage("Anonymous connection", name)
# 5. ensure that root exists
cacheKey = properties['host'] + ":" + properties['path']
if cacheKey not in rootCheckCache:
try:
connection.ensureRoot()
rootCheckCache[cacheKey] = True
except Exception as e:
if handleExceptions is False:
raise
printMessage("Failed ensure root exists [Exception: " + stringifyException(e) + "]", name)
handleException(e)
return []
# 6. set initial directory, set name, store connection
try:
connection.cwd(properties['path'])
except Exception as e:
if handleExceptions is False:
raise
printMessage("Failed to set path (probably connection failed) [Exception: " + stringifyException(e) + "]", name)
handleException(e)
return []
# 7. add to connections list
present = False
for con in result:
if con.name == connection.name:
present = True
if present is False:
result.append(connection)
return result
# Returns connection, connects if needed
#
# @type hash: string
# @param hash: connection cache hash (config filepath hash actually)
# @type config: object
# @param config: configuration object
# @type shared: bool
# @param shared: whether to use shared connection
#
# @return list of descendants of AbstractConnection (ftpsyncwrapper.py)
#
# @global connections
def getConnection(hash, config, shared=True):
if shared is False:
return makeConnection(config, hash)
# try cache
try:
if connections[hash] and len(connections[hash]) > 0:
printMessage("Connection cache hit (key: " + hash + ")", None, True)
if type(connections[hash]) is not list or len(connections[hash]) < len(config['connections']):
raise KeyError
# has config changed?
valid = True
index = 0
for name in config['connections']:
if getObjectHash(connections[hash][index].config) != getObjectHash(config['connections'][name]):
valid = False
index += 1
if valid == False:
for connection in connections[hash]:
connection.close(connections, hash)
raise KeyError
# is config truly alive
for connection in connections[hash]:
if connection.isAlive() is False:
raise KeyError
return connections[hash]
# cache miss
except KeyError:
connections[hash] = makeConnection(config, hash)
# schedule connection timeout
def closeThisConnection():
if hash not in usingConnections:
closeConnection(hash)
else:
sublime.set_timeout(closeThisConnection, config['connection_timeout'] * 1000)
sublime.set_timeout(closeThisConnection, config['connection_timeout'] * 1000)
# return all connections
return connections[hash]
# Close all connections for a given config file
#
# @type hash: string
# @param hash: connection cache hash (config filepath hash actually)
#
# @global connections
def closeConnection(hash):
if isString(hash) is False:
printMessage("Error closing connection: connection hash must be a string, " + str(type(hash)) + " given")
return
if hash not in connections:
return
try:
for connection in connections[hash]:
connection.close(connections, hash)
printMessage("Closed", connection.name)
if len(connections[hash]) == 0:
connections.pop(hash)
except Exception as e:
printMessage("Error when closing connection (key: " + hash + ") [Exception: " + stringifyException(e) + "]")
handleException(e)
# Returns a new worker
def createWorker():
queue = Worker(workerLimit, makeConnection, loadConfig)
if debugWorkers and isDebug:
queue.enableDebug()
return queue
# ==== Executive functions ======================================================================
class SyncObject(object):
def __init__(self):
self.onFinish = []
def addOnFinish(self, callback):
self.onFinish.append(callback)
return self
def triggerFinish(self, args):
for finish in self.onFinish:
if finish is not None:
finish(args)
# Generic synchronization command
class SyncCommand(SyncObject):
def __init__(self, file_path, config_file_path):
SyncObject.__init__(self)
if sys.version[0] == '3' and type(file_path) is bytes:
file_path = file_path.decode('utf-8')
self.running = True
self.closed = False
# has exclusive ownership of connection?
self.ownConnection = False
self.file_path = file_path
self.config_file_path = config_file_path
if isString(config_file_path) is False:
printMessage("Cancelling " + self.getIdentification() + ": invalid config_file_path given (type: " + str(type(config_file_path)) + ")")
self.close()
return
if os.path.exists(config_file_path) is False:
printMessage("Cancelling " + self.getIdentification() + ": config_file_path: No such file")
self.close()
return
self.config = loadConfig(config_file_path)
if file_path is not None:
self.basename = os.path.relpath(file_path, os.path.dirname(config_file_path))
self.config_hash = getFilepathHash(self.config_file_path)
self.connections = None
self.worker = None
def getIdentification(self):
return str(self.__class__.__name__) + " [" + str(self.file_path) + "]"
def setWorker(self, worker):
self.worker = worker
def setConnection(self, connections):
self.connections = connections
self.ownConnection = False
def _createConnection(self):
if self.connections is None:
self.connections = getConnection(self.config_hash, self.config, False)
self.ownConnection = True
def _localizePath(self, config, remote_path):
path = remote_path
if path.find(config['path']) == 0:
path = os.path.realpath(os.path.join(os.path.dirname(self.config_file_path), remote_path[len(config['path']):]))
return path
def execute(self):
raise NotImplementedError("Abstract method")
def close(self):
self.running = False
self.closed = True
def _closeConnection(self):
closeConnection(getFilepathHash(self.config_file_path))
def whitelistConnections(self, whitelistConnections):
toBeRemoved = []
for name in self.config['connections']:
if name not in whitelistConnections:
toBeRemoved.append(name)
for name in toBeRemoved:
self.config['connections'].pop(name)
return self
def isRunning(self):
return self.running
def __del__(self):
self.running = False
if hasattr(self, 'config_hash') and self.config_hash in usingConnections:
usingConnections.remove(self.config_hash)
if hasattr(self, 'ownConnection'):
if self.ownConnection:
for connection in self.connections:
if isDebug:
printMessage("Closing connection")
connection.close()
elif hasattr(self, 'worker') and self.worker is not None:
self.worker = None
# Transfer-related sychronization command
class SyncCommandTransfer(SyncCommand):
def __init__(self, file_path, config_file_path, progress=None, onSave=False, disregardIgnore=False, whitelistConnections=[], forcedSave=False):
SyncCommand.__init__(self, file_path, config_file_path)
self.progress = progress
self.onSave = onSave
self.disregardIgnore = False
# global ignore
if disregardIgnore is False and ignore is not None and re_ignore.search(self.file_path) is not None:
if self._onPreConnectionRemoved():
printMessage("File globally ignored: {" + os.path.basename(self.file_path) + "}", onlyVerbose=True)
self.close()
return
toBeRemoved = []
for name in self.config['connections']:
# on save
if self.config['connections'][name]['upload_on_save'] is False and onSave is True and forcedSave is False:
toBeRemoved.append(name)
continue
# ignore
if disregardIgnore is False and self.config['connections'][name]['ignore'] is not None and re.search(self.config['connections'][name]['ignore'], self.file_path):
if self._onPreConnectionRemoved():
toBeRemoved.append(name)
printMessage("File ignored by rule: {" + self.basename + "}", name, True)
continue
# whitelist
if len(whitelistConnections) > 0 and name not in whitelistConnections:
toBeRemoved.append(name)
continue
for name in toBeRemoved:
self.config['connections'].pop(name)
# Code that needs to run when a connection is removed (ignored)
#
# @return bool: truly remove?
def _onPreConnectionRemoved(self):
if self.progress is not None:
self.progress.progress()
return True
# Get connections of this command that were not removed due to config, ignore etc.
def getConnectionsApplied(self):
return self.config['connections']
# Creates a message when transfer is finished and sends it to console / bar / system
def finishMessage(self, title, stored, wasFinished):
notify = title + "ing "
if self.progress is None or self.progress.getTotal() == 1:
notify += "{" + self.basename + "} "
else:
notify += str(self.progress.getTotal()) + " files "
notify += "finished!"
if self.progress is not None and self.progress.isFinished() and wasFinished is False:
dumpMessage(getProgressMessage(stored, self.progress, notify))
else:
dumpMessage(getProgressMessage(stored, self.progress, title + "ed ", self.basename))
if systemNotifications and self.progress is None or (self.progress.isFinished() and wasFinished is False):
systemNotify(notify)
# Upload command
class SyncCommandUpload(SyncCommandTransfer):
def __init__(self, file_path, config_file_path, progress=None, onSave=False, disregardIgnore=False, whitelistConnections=[], forcedSave=False):
self.delayed = False
self.skip = False
SyncCommandTransfer.__init__(self, file_path, config_file_path, progress, onSave, disregardIgnore, whitelistConnections, forcedSave)
self.watcher = FileWatcher(self.config_file_path, self.config['connections'])
if os.path.exists(file_path) is False:
printMessage("Cancelling " + self.getIdentification() + ": file_path: No such file")
self.close()
return
# Code that needs to run when a connection is removed (ignored)
#
# @return bool: truly remove?
def _onPreConnectionRemoved(self):
SyncCommandTransfer._onPreConnectionRemoved(self)
# when saving and has afterwatch, don't remove completely, only skip
# so that we at least upload those changed files
if self._hasAfterWatch() and self.onSave:
self.skip = True
return False
return True
# Returns whether any of the config entries has after_save_watch enabled
# Can't be in FileWatcher due to cycling dependency with config and _onPreConnectionRemoved
def _hasAfterWatch(self):
for name in self.config['connections']:
if self.config['connections'][name]['after_save_watch']:
return True
return False
# ???
def setScanned(self, event, name, data):
self.watcher.setScanned(event, name, data)
# Executes command
def execute(self):
if self.closed is True:
printMessage("Cancelling " + self.getIdentification() + ": command is closed")
self.close()
return
if len(self.config['connections']) == 0:
printMessage("Cancelling " + self.getIdentification() + ": zero connections apply")
self.close()
return
self._createConnection()
# afterwatch
if self.onSave is True:
try:
self.watcher.prepare()
except Exception as e:
printMessage("Watching failed: {" + self.basename + "} [Exception: " + stringifyException(e) + "]", "", False, True)
usingConnections.append(self.config_hash)
stored = []
index = -1
for name in self.config['connections']:
index += 1
try:
self._createConnection()
# identification
connection = self.connections[index]
id = os.urandom(32)
scheduledUploads[self.file_path] = id
# action
def action():
try:
# cancelled
if self.file_path not in scheduledUploads or scheduledUploads[self.file_path] != id:
return
# process
if self.skip is False:
connection.put(self.file_path)
stored.append(name)
if self.skip is False:
printMessage("Uploaded {" + self.basename + "}", name)
else:
printMessage("Ignored {" + self.basename + "}", name)
# cleanup
scheduledUploads.pop(self.file_path)
if self.delayed is True:
for change in self.watcher.getChangedFiles(name):
if change.isSameFilepath(self.file_path):
continue
change = change.getPath()
command = SyncCommandUpload(change, getConfigFile(change), None, False, True, [name])
if self.worker is not None:
command.setWorker(self.worker)
self.worker.addCommand(command, self.config_file_path)
else:
command.execute()
self.delayed = False
self.__del__()
# no need to handle progress, delay action only happens with single uploads
self.triggerFinish(self.file_path)
except Exception as e:
printMessage("Upload failed: {" + self.basename + "} [Exception: " + stringifyException(e) + "]", name, False, True)
handleException(e)
finally:
self.running = False
# delayed
if self.onSave is True and self.config['connections'][name]['upload_delay'] > 0:
self.delayed = True
printMessage("Delaying processing " + self.basename + " by " + str(self.config['connections'][name]['upload_delay']) + " seconds", name, onlyVerbose=True)
sublime.set_timeout(action, self.config['connections'][name]['upload_delay'] * 1000)
else:
action()
except IndexError:
continue
except EOFError:
printMessage("Connection has been terminated, please retry your action", name, False, True)
self._closeConnection()
except Exception as e:
printMessage("Upload failed: {" + self.basename + "} [Exception: " + stringifyException(e) + "]", name, False, True)
handleException(e)
if self.progress is not None:
self.progress.progress()
if len(stored) > 0:
self.finishMessage("Upload", stored, True)
def __del__(self):
if hasattr(self, 'delayed') and self.delayed is False:
SyncCommand.__del__(self)
else:
self.closed = True
self.running = False
# Download command
class SyncCommandDownload(SyncCommandTransfer):
def __init__(self, file_path, config_file_path, progress=None, onSave=False, disregardIgnore=False, whitelistConnections=[], forcedSave = False):
SyncCommandTransfer.__init__(self, file_path, config_file_path, progress, onSave, disregardIgnore, whitelistConnections, forcedSave)
self.isDir = False
self.forced = False
self.skip = False
def setIsDir(self):
self.isDir = True
return self
def setForced(self):
self.forced = True
return self
def setSkip(self):
self.skip = True
return self
def execute(self):
self.forced = True
if self.closed is True:
printMessage("Cancelling " + self.getIdentification() + ": command is closed")
self.close()
return
if len(self.config['connections']) == 0:
printMessage("Cancelling " + self.getIdentification() + ": zero connections apply")
self.close()
return
self._createConnection()
usingConnections.append(self.config_hash)
index = -1
stored = []
for name in self.config['connections']:
index += 1
try:
if self.isDir or os.path.isdir(self.file_path):
contents = self.connections[index].list(self.file_path)
if type(contents) is not list:
printMessage("List returned no entries {0}".format(self.file_path))
continue
if os.path.exists(self.file_path) is False:
os.makedirs(self.file_path)
if self.progress:
for entry in contents:
if entry.isDirectory() is False:
self.progress.add([entry.getName()])
self.running = False
for entry in contents:
full_name = os.path.join(self.file_path, entry.getName())
command = SyncCommandDownload(full_name, self.config_file_path, progress=self.progress, disregardIgnore=self.disregardIgnore)
if self.forced:
command.setForced()
if entry.isDirectory() is True:
command.setIsDir()
elif not self.forced and entry.isNewerThan(full_name) is True:
command.setSkip()
if self.worker is not None:
command.setWorker(self.worker)
self.worker.addCommand(command, self.config_file_path)
else:
command.execute()
else:
if not self.skip or self.forced:
self.connections[index].get(self.file_path, blockCallback = lambda: dumpMessage(getProgressMessage([name], self.progress, "Downloading", self.basename)))
printMessage("Downloaded {" + self.basename + "}", name)
self.triggerFinish(self.file_path)
else:
printMessage("Skipping {" + self.basename + "}", name)
stored.append(name)
except IndexError:
continue
except FileNotFoundException:
printMessage("Remote file not found", name, False, True)
handleException(e)
except EOFError:
printMessage("Connection has been terminated, please retry your action", name, False, True)
self._closeConnection()
except Exception as e:
printMessage("Download of {" + self.basename + "} failed [Exception: " + stringifyException(e) + "]", name, False, True)
handleException(e)
finally:
self.running = False
break
wasFinished = False
if self.progress is None or self.progress.isFinished() is False:
wasFinished = True
if self.progress is not None and self.isDir is not True:
self.progress.progress()
if len(stored) > 0:
self.finishMessage("Download", stored, wasFinished)
file_path = self.file_path
def refresh():
view = sublime.active_window().active_view()
if view is not None and view.file_name() == file_path:
view.run_command("revert")
sublime.set_timeout(refresh, 1)
# Rename command
class SyncCommandRename(SyncCommand):
def __init__(self, file_path, config_file_path, new_name):
if os.path.exists(file_path) is False:
printMessage("Cancelling " + self.getIdentification() + ": file_path: No such file")
self.close()
return
if isString(new_name) is False:
printMessage("Cancelling SyncCommandRename: invalid new_name given (type: " + str(type(new_name)) + ")")
self.close()
return
if len(new_name) == 0:
printMessage("Cancelling SyncCommandRename: empty new_name given")
self.close()
return
self.new_name = new_name
self.dirname = os.path.dirname(file_path)
SyncCommand.__init__(self, file_path, config_file_path)
def execute(self):
if self.closed is True:
printMessage("Cancelling " + self.getIdentification() + ": command is closed")
self.close()
return
if len(self.config['connections']) == 0:
printMessage("Cancelling " + self.getIdentification() + ": zero connections apply")
self.close()
return
self._createConnection()
usingConnections.append(self.config_hash)
index = -1
renamed = []
exists = []
remote_new_name = os.path.join( os.path.split(self.file_path)[0], self.new_name)
for name in self.config['connections']:
index += 1
check = None
try:
check = self.connections[index].list(remote_new_name)
except FileNotFoundException:
pass
if type(check) is list and len(check) > 0:
exists.append(name)
def action(forced=False):
index = -1
for name in self.config['connections']:
index += 1
try:
self.connections[index].rename(self.file_path, self.new_name, forced)
printMessage("Renamed {" + self.basename + "} -> {" + self.new_name + "}", name)
renamed.append(name)
except IndexError:
continue
except TargetAlreadyExists as e:
printMessage(stringifyException(e))
except EOFError:
printMessage("Connection has been terminated, please retry your action", name, False, True)
self._closeConnection()
except Exception as e:
if str(e).find("No such file or directory"):
printMessage("Remote file not found", name, False, True)
renamed.append(name)
else:
printMessage("Renaming failed: {" + self.basename + "} -> {" + self.new_name + "} [Exception: " + stringifyException(e) + "]", name, False, True)
handleException(e)
# message
if len(renamed) > 0:
# rename file
replace(self.file_path, os.path.join(self.dirname, self.new_name))
self.triggerFinish(self.file_path)
printMessage("Remotely renamed {" + self.basename + "} -> {" + self.new_name + "}", "remotes: " + ','.join(renamed), status=True)
if len(exists) == 0:
action()
else:
def sync(index):
if index is 0:
printMessage("Renaming: overwriting target")
action(True)
else:
printMessage("Renaming: keeping original")
overwrite = []
overwrite.append("Overwrite remote file? Already exists in:")
for remote in exists:
overwrite.append(remote + " [" + self.config['connections'][name]['host'] + "]")
cancel = []
cancel.append("Cancel renaming")
for remote in exists:
cancel.append("")
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel([ overwrite, cancel ], sync), 1)
# Upload command
class SyncCommandDelete(SyncCommandTransfer):
def __init__(self, file_path, config_file_path, progress=None, onSave=False, disregardIgnore=False, whitelistConnections=[]):
SyncCommandTransfer.__init__(self, file_path, config_file_path, progress, False, False, whitelistConnections)
def execute(self):
if self.closed is True:
printMessage("Cancelling " + self.getIdentification() + ": command is closed")
return
if self.progress is not None:
self.progress.progress()
if len(self.config['connections']) == 0:
printMessage("Cancelling " + self.getIdentification() + ": zero connections apply")
return
self._createConnection()
usingConnections.append(self.config_hash)
deleted = []
index = -1
for name in self.config['connections']:
index += 1
try:
# identification
connection = self.connections[index]
# action
try:
# process
connection.delete(self.file_path)
deleted.append(name)
printMessage("Deleted {" + self.basename + "}", name)
except FileNotFoundException:
deleted.append(name)
printMessage("No remote version of {" + self.basename + "} found", name)
except Exception as e:
printMessage("Delete failed: {" + self.basename + "} [Exception: " + stringifyException(e) + "]", name, False, True)
handleException(e)
except IndexError:
continue
except FileNotFoundException:
printMessage("Remote file not found", name, False, True)
deleted.append(name)
continue
except EOFError:
printMessage("Connection has been terminated, please retry your action", name, False, True)
self._closeConnection()
except Exception as e:
if str(e).find("No such file or directory"):
printMessage("Remote file not found", name, False, True)
deleted.append(name)
else:
printMessage("Delete failed: {" + self.basename + "} [Exception: " + stringifyException(e) + "]", name, False, True)
handleException(e)
if len(deleted) > 0:
if os.path.exists(self.file_path):
if os.path.isdir(self.file_path):
shutil.rmtree(self.file_path)
else:
os.remove(self.file_path)
self.triggerFinish(self.file_path)
dumpMessage(getProgressMessage(deleted, self.progress, "Deleted", self.basename))
# Rename command
class SyncCommandGetMetadata(SyncCommand):
def execute(self):
if self.closed is True:
printMessage("Cancelling " + self.getIdentification() + ": command is closed")
return
if len(self.config['connections']) == 0:
printMessage("Cancelling " + self.getIdentification() + ": zero connections apply")
return
self._createConnection()
usingConnections.append(self.config_hash)
index = -1
results = []
for name in self.config['connections']:
index += 1
try:
metadata = self.connections[index].list(self.file_path)
if type(metadata) is list and len(metadata) > 0:
results.append({
'connection': name,
'metadata': metadata[0]
})
except IndexError:
continue
except FileNotFoundException:
raise
except EOFError:
printMessage("Connection has been terminated, please retry your action", name, False, True)
self._closeConnection()
except Exception as e:
printMessage("Getting metadata failed: {" + self.basename + "} [Exception: " + stringifyException(e) + "]", name, False, True)
handleException(e)
return results
def performRemoteCheck(file_path, window, forced = False, whitelistConnections=[]):
if isString(file_path) is False:
return
if window is None:
return
basename = os.path.basename(file_path)
printMessage("Checking {" + basename + "} if up-to-date", status=True)
config_file_path = getConfigFile(file_path)
if config_file_path is None:
return printMessage("Found no config > for file: " + file_path, status=True)
config = loadConfig(config_file_path)
try:
metadata = SyncCommandGetMetadata(file_path, config_file_path)
if len(whitelistConnections) > 0:
metadata.whitelistConnections(whitelistConnections)
metadata = metadata.execute()
except FileNotFoundException:
printMessage("Remote file not found", status=True)
return
except Exception as e:
printMessage("Error when getting metadata: " + stringifyException(e))
handleException(e)
metadata = []
if type(metadata) is not list:
return printMessage("Invalid metadata response, expected list, got " + str(type(metadata)))
if len(metadata) == 0:
return printMessage("No version of {" + basename + "} found on any server", status=True)
newest = []
oldest = []
every = []
for entry in metadata:
if forced is False and entry['metadata'].isDifferentSizeThan(file_path) is False:
continue
if entry['metadata'].isNewerThan(file_path):
newest.append(entry)
every.append(entry)
else:
oldest.append(entry)
if entry['metadata'].isDifferentSizeThan(file_path):
every.append(entry)
if len(every) > 0:
every = metadata
sorted(every, key=lambda entry: entry['metadata'].getLastModified())
every.reverse()
connectionCount = len(every)
def sync(index):
if index == connectionCount + 1:
return RemoteSyncCall(file_path, getConfigFile(file_path), True).start()
if index > 0:
if isDebug:
i = 0
for entry in every:
printMessage("Listing connection " + str(i) + ": " + str(entry['connection']))
i += 1
printMessage("Index selected: " + str(index - 1))
return RemoteSyncDownCall(file_path, getConfigFile(file_path), True, whitelistConnections=[every[index - 1]['connection']]).start()
filesize = os.path.getsize(file_path)
allItems = []
items = []
items.append("Keep current " + os.path.basename(file_path))
items.append("Size: " + str(round(float(os.path.getsize(file_path)) / 1024, 3)) + " kB")
items.append("Last modified: " + formatTimestamp(os.path.getmtime(file_path)))
allItems.append(items)
index = 1
for item in every:
item_filesize = item['metadata'].getFilesize()
if item_filesize == filesize:
item_filesize = "same size"
else:
if item_filesize > filesize:
item_filesize = str(round(item_filesize / 1024, 3)) + " kB ~ larger"
else:
item_filesize = str(round(item_filesize / 1024, 3)) + " kB ~ smaller"
time = str(item['metadata'].getLastModifiedFormatted(timeFormat))
if item in newest:
time += " ~ newer"
else:
time += " ~ older"
items = []
items.append("Get from " + item['connection'] + " [" + config['connections'][ item['connection'] ]['host'] + "]")
items.append("Size: " + item_filesize)
items.append("Last modified: " + time)
allItems.append(items)
index += 1
upload = []
upload.append("Upload file " + os.path.basename(file_path))
upload.append("Size: " + str(round(float(os.path.getsize(file_path)) / 1024, 3)) + " kB")
upload.append("Last modified: " + formatTimestamp(os.path.getmtime(file_path)))
allItems.append(upload)
sublime.set_timeout(lambda: window.show_quick_panel(allItems, sync), 1)
else:
printMessage("All remote versions of {" + basename + "} are of same size and older", status=True)
class ShowInfo(SyncCommand):
def execute(self, window):
if self.closed:
printMessage("Cancelling " + self.getIdentification() + ": command closed")
return
if len(self.config['connections']) == 0:
printMessage("Cancelling " + self.getIdentification() + ": zero connections apply")
return
self._createConnection()
usingConnections.append(self.config_hash)
index = -1
results = []
for name in self.config['connections']:
index += 1
try:
info = self.connections[index].getInfo()
if type(info) is dict:
results.append(info)
except IndexError:
continue
except Exception as e:
printMessage("Getting info failed [Exception: " + stringifyException(e) + "]", name, False, True)
handleException(e)
maxFeats = 0
for item in results:
if len(item['features']) > maxFeats:
maxFeats = len(item['features'])
output = []
for item in results:
if item['config']['tls']:
encryption = "enabled"
else:
encryption = "disabled"
if item['canEncrypt'] is None:
encryption += " [unconfirmed]"
elif item['canEncrypt'] is False:
encryption += " [NOT SUPPORTED]"
else:
encryption += " [SUPPORTED]"
entry = []
entry.append(item['name'] + " [" + item['config']['host'] + "]")
entry.append("Type: " + item['type'])
entry.append("User: " + item['config']['username'])
entry.append("Encryption: " + encryption)
if "MFMT" in item['features']:
entry.append("Last modified: SUPPORTED")
else:
entry.append("Last modified: NOT SUPPORTED")
entry.append("")
entry.append("Server features:")
feats = 0
for feat in item['features']:
entry.append(feat)
feats = feats + 1
if feats < maxFeats:
for i in range(1, maxFeats - feats):
entry.append("")
output.append(entry)
sublime.set_timeout(lambda: window.show_quick_panel(output, None), 1)
class SyncNavigator(SyncCommand):
def __init__(self, file_path, config_file_path, connection_name = None, path = None, remotePath = None):
self.configConnection = None
self.configName = None
self.files = []
self.defaultPath = path
self.defaultRemotePath = remotePath
SyncCommand.__init__(self, None, config_file_path)
if connection_name is not None:
self.selectConnection(connection_name)
def execute(self):
if self.closed is True:
printMessage("Cancelling " + self.getIdentification() + ": command is closed")
return
if len(self.config['connections']) == 0:
printMessage("Cancelling " + self.getIdentification() + ": zero connections apply")
return
usingConnections.append(self.config_hash)
index = -1
results = []
if len(self.config['connections']) > 1 and self.configConnection is None:
self.listConnections()
else:
if self.configConnection is None:
for name in self.config['connections']:
self.selectConnection(name)
if self.defaultPath:
self.listFiles(self.defaultPath)
elif self.defaultRemotePath:
self.listFiles(self.defaultRemotePath, True)
def listConnections(self):
connections = []
names = []
for name in self.config['connections']:
connection = self.config['connections'][name]
connections.append([ name, "Host: " + connection['host'], "Path: " + connection['path'] ])
names.append(name)
def handleConnectionSelection(index):
if index == -1:
return
self.selectConnection(names[index])
self.listFiles(self.defaultPath)
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel(connections, handleConnectionSelection), 1)
def selectConnection(self, name):
self.configConnection = self.config['connections'][name]
self.configName = name
self.config['connections'] = {}
self.config['connections'][name] = self.configConnection
def updateNavigateLast(self, path):
navigateLast['config_file'] = self.config_file_path
navigateLast['connection_name'] = self.configName
navigateLast['path'] = path
def listFiles(self,path=None,forced=False):
if self.closed is True:
printMessage("Cancelling " + self.getIdentification() + ": command is closed")
return
self._createConnection()
connection = self.connections[0]
# figure out path
remote = True
if path is None or path == self.defaultPath or self.defaultPath is None:
remote = False
if path is None:
path = os.path.dirname(self.config_file_path)
if forced:
remote = True
self.updateNavigateLast(path)
mappedPath = connection.getMappedPath(path, remote)
# get contents
contents = connection.list(path, remote, True)
contents = addLinks(contents, mappedPath)
contents = sorted(contents, key = lambda entry: (entry.getName() != "..", entry.isDirectory() is False, entry.getName().lower()))
content = []
# add header
header = self.getHeader(mappedPath)
content.extend(header)
# find current folder
currentMeta = None
for meta in contents:
if meta.getName() == '.':
for i in range(len(header)):
self.files.append(meta)
break
# add contents
for meta in contents:
if displayDetails:
entry = self.getDetailedEntry(meta, path, connection)
else:
entry = self.getSimpleEntry(meta, path, connection)
if entry:
content.append(entry)
self.files.append(meta)
if len(contents) == 0:
printMessage("No files found in remote path for local {" + str(path) + "}", status=True)
def handleMetaSelection(index):
if index == -1:
return
meta = self.files[index]
self.files = []
if meta.isDirectory():
if index >= len(header):
self.listFiles(meta.getFilepath())
else:
self.listFolderActions(meta)
else:
self.listFileActions(meta)
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel(content, handleMetaSelection), 1)
def getHeader(self, mappedPath):
currentFolder = self.configConnection['host'] + ':' + self.connections[0].getNormpath(mappedPath)
if currentFolder == '..':
currentFolder = '/'
if displayDetails:
return self.getDetailedCurrentFolder(currentFolder)
else:
return self.getSimpleCurrentFolder(currentFolder)
def getSimpleCurrentFolder(self, currentFolder):
return [[ currentFolder + "/" ], [decode("• Folder actions")]]
def getDetailedCurrentFolder(self, currentFolder):
entry = [currentFolder, decode("• Current folder"), decode("• Click to list actions")]
if displayPermissions:
entry.append("")
return [entry]
def getSimpleEntry(self, meta, path, connection):
entry = []
if meta.isDirectory():
if meta.getName() == '.' or (meta.getName() == '..' and connection.getNormpath(path) == '/'):
return None
if meta.getName() == '..':
entry.append(browseConfig['browse_up'])
else:
entry.append("\t" + browseConfig['browse_folder_prefix'] + decode(meta.getName()) + browseConfig['browse_folder_suffix'])
else:
entry.append("\t" + browseConfig['browse_file_prefix'] + decode(meta.getName()) + browseConfig['browse_file_suffix'])
return entry
def getDetailedEntry(self, meta, path, connection):
entry = []
if meta.isDirectory():
if meta.getName() == '.' or (meta.getName() == '..' and connection.getNormpath(path) == '/'):
return None
entry.append("[" + decode(meta.getName()) + "]")
entry.append("Directory")
else:
entry.append(decode(meta.getName()))
entry.append("Size: " + meta.getHumanFilesize())
if displayDetails:
entry.append("Last modified: " + meta.getLastModifiedFormatted(displayTimestampFormat))
if displayPermissions:
entry.append("Permissions: " + meta.getPermissions())
entry.append("Path: " + meta.getPath())
return entry
def listFolderActions(self, meta, action = None):
if self.closed is True:
printMessage("Cancelling " + self.getIdentification() + ": command is closed")
return
self._createConnection()
connection = self.connections[0]
path = meta.getPath()
localFile = connection.getLocalPath( str(meta.getPath() + '/' + meta.getName()).replace('/.',''), os.path.dirname(self.config_file_path))
exists = 0
name = meta.getName()
if name == '.':
split = re_thisFolder.search(meta.getPath())
if split is not None:
name = split.group(1)
if name == '..':
split = re_parentFolder.search(meta.getPath())
if split is not None:
name = split.group(1)
else:
name = '/'
self.listFiles(meta.getPath() + '/' + meta.getName())
return
prefix = browseConfig['browse_action_prefix']
actions = []
actions.append("Folder: " + connection.getNormpath(path) + "/")
actions.append(prefix + "Download folder")
if os.path.exists(localFile):
actions.append(prefix + "Upload folder")
exists = 1
actions.append(prefix + "Remove folder")
actions.append(prefix + "Rename folder")
actions.append(prefix + "Change permissions")
actions.append(prefix + "Show details")
actions.append(prefix + "Copy path")
def handleAction(index):
if index == -1:
return
if index == 0:
self.listFiles(meta.getPath() + '/' + meta.getName())
return
if index == 1:
call = RemoteSyncDownCall([[localFile, getConfigFile(localFile)]], None, False, True)
call.setIsDir()
call.start()
return
if exists and index == 2:
RemoteSyncCall(gatherFiles([localFile]), None, False, True).start()
return
if index == 2 + exists:
RemoteSyncDelete(localFile).start()
return
if index == 3 + exists:
try:
sublime.active_window().run_command("ftp_sync_rename", { "paths": [ localFile ] })
except Exception as e:
handleException(e)
return
if index == 4 + exists:
def permissions(newPermissions):
self._createConnection()
connection = self.connections[0]
connection.cwd(meta.getPath())
connection.chmod(meta.getName(), newPermissions)
printMessage("Properties of " + meta.getName() + " changed to " + newPermissions, status=True)
sublime.active_window().show_input_panel('Change permissions to:', self.configConnection['default_folder_permissions'], permissions, None, None)
if index == 5 + exists:
info = []
info.append(meta.getName())
info.append("[Directory]")
info.append("Path: " + str(meta.getPath())[len(self.configConnection['path']):] + '/' + meta.getName().replace('/./', '/'))
info.append("Permissions: " + meta.getPermissions() + " (" + meta.getPermissionsNumeric() + ")")
if connection.hasTrueLastModified():
info.append("Last Modified: " + meta.getLastModifiedFormatted())
else:
info.append("Last upload time: " + meta.getLastModifiedFormatted())
info.append("")
if os.path.exists(localFile):
info.append("[Has local version]")
info.append("Local last modified: " + formatTimestamp(os.path.getmtime(localFile), displayTimestampFormat))
if sublime.platform() == 'windows':
info.append("Local created: " + formatTimestamp(os.path.getctime(localFile), displayTimestampFormat))
else:
info.append("[No local version]")
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel([info], None), 1)
return
if index == 6 + exists:
get_path = meta.getPath()
sublime.set_clipboard(get_path)
return
if action is None:
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel(actions, handleAction), 1)
else:
handleAction(action)
def listFileActions(self, meta, action = None):
if self.closed is True:
printMessage("Cancelling " + self.getIdentification() + ": command is closed")
return
path = meta.getPath() + '/' + meta.getName()
self._createConnection()
connection = self.connections[0]
localFile = connection.getLocalPath(meta.getPath() + '/' + meta.getName(), os.path.dirname(self.config_file_path))
exists = 0
hasSidebar = packageExists("SideBarEnhancements")
prefix = browseConfig['browse_action_prefix']
actions = []
actions.append("File: " + connection.getNormpath(path) + "/")
actions.append(prefix + "Back")
actions.append(prefix + "Download file")
if os.path.exists(localFile):
actions.append(prefix + "Upload file")
exists = 1
actions.append(prefix + "Remove file")
actions.append(prefix + "Rename file")
actions.append(prefix + "Change permissions")
actions.append(prefix + "Show details")
actions.append(prefix + "Copy path")
def handleAction(index):
if index == -1:
return
if index == 0 or index == 1:
self.listFiles(meta.getPath())
return
if index == 2:
def dopen(args):
try:
sublime.set_timeout(lambda: sublime.active_window().open_file(args), 1)
except Exception as e:
handleException(e)
call = RemoteSyncDownCall(localFile, getConfigFile(self.config_file_path), False, True)
if settings.get('browse_open_on_download'):
call.onFinish(dopen)
call.start()
return
if exists and index == 2:
RemoteSyncCall(gatherFiles([localFile]), None, True, True).start()
return
if index == 3 + exists:
RemoteSyncDelete(localFile).start()
return
if index == 4 + exists:
try:
sublime.active_window().run_command("ftp_sync_rename", { "paths": [ localFile ] })
except Exception as e:
handleException(e)
return
if index == 4 + exists + int(hasSidebar):
def permissions(newPermissions):
self._createConnection()
connection = self.connections[0]
connection.cwd(meta.getPath())
connection.chmod(meta.getName(), newPermissions)
printMessage("Properties of " + meta.getName() + " changed to " + newPermissions, status=True)
sublime.active_window().show_input_panel('Change permissions to:', self.configConnection['default_folder_permissions'], permissions, None, None)
return
if index == 5 + exists + int(hasSidebar):
info = []
info.append(meta.getName())
info.append("[File]")
info.append("Path: " + str(meta.getPath())[len(self.configConnection['path']):] + '/' + meta.getName().replace('/./', '/'))
info.append("Size: " + str(round(meta.getFilesize()/1024,3)) + " kB")
info.append("Permissions: " + meta.getPermissions() + " (" + meta.getPermissionsNumeric() + ")")
if connection.hasTrueLastModified():
info.append("Last Modified: " + meta.getLastModifiedFormatted())
else:
info.append("Last upload time: " + meta.getLastModifiedFormatted())
info.append("")
if os.path.exists(localFile):
info.append("[Has local version]")
info.append("Local size: " + str(round(float(os.path.getsize(localFile)) / 1024, 3)) + " kB")
info.append("Local last modified: " + formatTimestamp(os.path.getmtime(localFile), displayTimestampFormat))
if sublime.platform() == 'windows':
info.append("Local created: " + formatTimestamp(os.path.getctime(localFile), displayTimestampFormat))
else:
info.append("[No local version]")
def back(index):
self.listFileActions(meta, action)
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel([info], back), 1)
return
if index == 6 + exists + int(hasSidebar):
sublime.set_clipboard(meta.getFilepath())
return
if action is None:
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel(actions, handleAction), 1)
else:
handleAction(actions)
# ==== Watching ===========================================================================
# list of file paths to be checked on load
checksScheduled = []
# pre_save x post_save upload prevention
preventUpload = []
# File watching
class RemoteSync(sublime_plugin.EventListener):
def on_pre_save(self, view):
file_path = getFileName(view)
config_file_path = getConfigFile(file_path)
if config_file_path is None:
return
def pre_save(_files):
window = view.window()
if window is None:
window = sublime.active_window()
RemotePresave(file_path, fileToMetafile(file_path), config_file_path, _files, view, window, self.manual_on_post_save).start()
fillPasswords([[ None, config_file_path ]], pre_save, sublime.active_window())
def on_post_save(self, view):
fileName = os.path.basename(view.file_name())
if fileName == 'FTPSync.sublime-settings':
sublime.set_timeout(plugin_loaded, 1000)
def manual_on_post_save(self, file_path):
config_file_path = getConfigFile(file_path)
command = RemoteSyncCall(file_path, config_file_path, True)
if config_file_path in preScan and preScan[config_file_path] is not None:
command.setPreScan(preScan[config_file_path])
command.start()
def on_close(self, view):
file_path = getFileName(view)
if file_path is None:
return
config_file_path = getConfigFile(file_path)
if file_path in checksScheduled:
checksScheduled.remove(file_path)
if config_file_path is not None:
closeConnection(getFilepathHash(config_file_path))
# When a file is loaded and at least 1 connection has download_on_open enabled
# it will check those enabled if the remote version is newer and offers the newest to download
def on_load(self, view):
file_path = getFileName(view)
if file_path and os.path.basename(file_path) == configName:
view.set_syntax_file(getConfigSyntax())
if ignore is not None and re_ignore is not None and re_ignore.search(file_path) is not None:
return
if view not in checksScheduled:
checksScheduled.append(file_path)
def check():
if file_path in checksScheduled:
def execute(files):
whitelistConnections = []
config_file_path = getConfigFile(file_path)
if config_file_path is None:
return printMessage("Config not found for: " + file_path)
config = loadConfig(config_file_path)
for name in config['connections']:
if config['connections'][name]['download_on_open'] is True:
whitelistConnections.append(name)
if len(whitelistConnections):
RemoteSyncCheck(file_path, view.window(), forced=False, whitelistConnections=whitelistConnections).start()
fillPasswords([[ file_path, getConfigFile(file_path) ]], execute, sublime.active_window())
sublime.set_timeout(check, downloadOnOpenDelay)
# ==== Threading ===========================================================================
def fillProgress(progress, entry):
if len(entry) == 0:
return
if isString(entry[0]):
entry = entry[0]
if type(entry) is list:
for item in entry:
fillProgress(progress, item)
else:
progress.add([entry])
class RemoteThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.preScan = None
self._whitelistConnetions = []
self._onFinish = None
def setPreScan(self, preScan):
self.preScan = preScan
def addPreScan(self, command):
if self.preScan is not None:
for name in self.preScan:
command.setScanned('before', name, self.preScan[name])
def setWhitelistConnections(self, whitelistConnections):
self._whitelistConnetions = whitelistConnections
def addWhitelistConnections(self, command):
if hasattr(self, '_whitelistConnections'):
command.whitelistConnections(self._whitelistConnetions)
return command
def onFinish(self, callback):
self._onFinish = callback
def getOnFinish(self):
if hasattr(self, '_onFinish'):
return self._onFinish
else:
return None
class RemotePresave(RemoteThread):
def __init__(self, file_path, metafile, config_file_path, _files, view, window, callback):
self.file_path = file_path
self.metafile = metafile
self.config_file_path = config_file_path
self._files = _files
self.view = view
self.window = window
self.callback = callback
RemoteThread.__init__(self)
def run(self):
_files = self._files
file_path = self.file_path
config_file_path = self.config_file_path
view = self.view
preScan[config_file_path] = {}
root = os.path.dirname(config_file_path)
config = loadConfig(config_file_path)
blacklistConnections = []
for connection in config['connections']:
properties = config['connections'][connection]
if properties['upload_on_save'] is False:
blacklistConnections.append(connection)
watch = properties['after_save_watch']
if type(watch) is list and len(watch) > 0 and properties['upload_delay'] > 0:
preScan[config_file_path][connection] = {}
for folder, filepattern in watch:
files = gatherMetafiles(filepattern, os.path.join(root, folder))
preScan[config_file_path][connection].update(files.items())
if properties['debug_extras']['after_save_watch']:
printMessage("<debug> dumping pre-scan")
print ("COUNT: " + str(len(preScan[config_file_path][connection])))
for change in preScan[config_file_path][connection]:
print ("Path: " + preScan[config_file_path][connection][change].getPath() + " | Name: " + preScan[config_file_path][connection][change].getName())
if len(blacklistConnections) == len(config['connections']):
return
try:
metadata = SyncCommandGetMetadata(file_path, config_file_path).execute()
except Exception as e:
if str(e).find('No such file'):
printMessage("No version of {" + os.path.basename(file_path) + "} found on any server", status=True)
else:
printMessage("Error when getting metadata: " + stringifyException(e))
handleException(e)
metadata = []
newest = None
newer = []
index = 0
for entry in metadata:
properties = config['connections'][entry['connection']]
if 'debug_overwrite_prevention' in properties['debug_extras'] and properties['debug_extras']['debug_overwrite_prevention']:
printMessage("<debug> dumping overwrite prevention")
print ("File [local]: " + str(file_path))
print ("File [remote]: " + str(entry['metadata'].getPath()))
print ("Enabled: " + str(properties['check_time'] is True))
print ("Not in blacklist: " + str(entry['connection'] not in blacklistConnections))
print ("Is remote newer: " + str(entry['metadata'].isNewerThan(self.metafile)))
print ("Is size different: " + str(entry['metadata'].isDifferentSizeThan(file_path)))
print ("In overwrite cancelled: " + str(file_path in overwriteCancelled))
print ("+ [remote] last modified: " + str(entry['metadata'].getLastModified()))
print ("+ [local] last modified: " + str(self.metafile.getLastModified()))
print ("+ [remote] size: " + str(entry['metadata'].getFilesize()))
print ("+ [local] size: " + str(os.path.getsize(file_path)))
if (entry['connection'] not in blacklistConnections and properties['check_time'] is True and entry['metadata'].isNewerThan(self.metafile) and entry['metadata'].isDifferentSizeThan(file_path)) or file_path in overwriteCancelled:
newer.append(entry['connection'])
if newest is None or newest > entry['metadata'].getLastModified():
newest = index
index += 1
if len(newer) > 0:
preventUpload.append(file_path)
def sync(index):
if index is 0:
printMessage("Overwrite prevention: overwriting")
if file_path in overwriteCancelled:
overwriteCancelled.remove(file_path)
self.callback(self.file_path)
else:
printMessage("Overwrite prevention: cancelled upload")
if file_path not in overwriteCancelled:
overwriteCancelled.append(file_path)
yes = []
yes.append("Yes, overwrite newer")
yes.append("Last modified: " + metadata[newest]['metadata'].getLastModifiedFormatted())
for entry in newer:
yes.append(entry + " [" + config['connections'][entry]['host'] + "]")
no = []
no.append("No")
no.append("Cancel uploading")
for entry in newer:
no.append("")
sublime.set_timeout(lambda: self.window.show_quick_panel([ yes, no ], sync), 1)
else:
self.callback(self.file_path)
class RemoteSyncCall(RemoteThread):
def __init__(self, file_path, config, onSave, disregardIgnore=False, whitelistConnections=[], forcedSave=False):
self.file_path = file_path
self.config = config
self.onSave = onSave
self.forcedSave = forcedSave
self.disregardIgnore = disregardIgnore
self.whitelistConnections = whitelistConnections
RemoteThread.__init__(self)
def run(self):
target = self.file_path
if isString(target) and self.config is None:
return False
elif isString(target):
command = SyncCommandUpload(target, self.config, onSave=self.onSave, disregardIgnore=self.disregardIgnore, whitelistConnections=self.whitelistConnections, forcedSave=self.forcedSave)
command.addOnFinish(self.getOnFinish())
self.addWhitelistConnections(command)
self.addPreScan(command)
command.execute()
elif type(target) is list and len(target) > 0:
progress = Progress()
fillProgress(progress, target)
queue = createWorker()
for file_path, config in target:
command = SyncCommandUpload(file_path, config, progress=progress, onSave=self.onSave, disregardIgnore=self.disregardIgnore, whitelistConnections=self.whitelistConnections, forcedSave=self.forcedSave)
command.addOnFinish(self.getOnFinish())
self.addWhitelistConnections(command)
self.addPreScan(command)
if workerLimit > 1:
queue.addCommand(command, config)
else:
command.execute()
class RemoteSyncDownCall(RemoteThread):
def __init__(self, file_path, config, disregardIgnore=False, forced=False, whitelistConnections=[]):
self.file_path = file_path
self.config = config
self.disregardIgnore = disregardIgnore
self.forced = forced
self.whitelistConnections = []
self.isDir = False
RemoteThread.__init__(self)
def setIsDir(self):
self.isDir = True
def run(self):
target = self.file_path
if isString(target) and self.config is None:
return False
elif isString(target):
queue = createWorker()
command = SyncCommandDownload(target, self.config, disregardIgnore=self.disregardIgnore, whitelistConnections=self.whitelistConnections)
command.addOnFinish(self.getOnFinish())
self.addWhitelistConnections(command)
if self.isDir:
command.setIsDir()
if self.forced:
command.setForced()
if workerLimit > 1:
command.setWorker(queue)
queue.addCommand(command, self.config)
else:
command.execute()
elif type(target) is list and len(target) > 0:
total = len(target)
progress = Progress(total)
queue = createWorker()
for file_path, config in target:
if os.path.isfile(file_path):
progress.add([file_path])
command = SyncCommandDownload(file_path, config, disregardIgnore=self.disregardIgnore, progress=progress, whitelistConnections=self.whitelistConnections)
command.addOnFinish(self.getOnFinish())
self.addWhitelistConnections(command)
if self.isDir:
command.setIsDir()
if self.forced:
command.setForced()
if workerLimit > 1:
command.setWorker(queue)
queue.addCommand(command, config)
else:
command.execute()
class RemoteSyncRename(RemoteThread):
def __init__(self, file_path, config, new_name):
self.file_path = file_path
self.new_name = new_name
self.config = config
RemoteThread.__init__(self)
def run(self):
self.addWhitelistConnections(SyncCommandRename(self.file_path, self.config, self.new_name).addOnFinish(self.getOnFinish())).execute()
class RemoteSyncCheck(RemoteThread):
def __init__(self, file_path, window, forced=False, whitelistConnections=[]):
self.file_path = file_path
self.window = window
self.forced = forced
self.whitelistConnections = whitelistConnections
RemoteThread.__init__(self)
def run(self):
performRemoteCheck(self.file_path, self.window, self.forced, self.whitelistConnections)
class RemoteSyncDelete(RemoteThread):
def __init__(self, file_paths):
self.file_path = file_paths
RemoteThread.__init__(self)
def run(self):
target = self.file_path
if isString(target):
self.file_path = [ target ]
def sync(index):
if index is 0:
self.delete()
else:
printMessage("Deleting: cancelled")
yes = []
yes.append("Yes, delete the selected items [also remotely]")
for entry in self.file_path:
yes.append( getRootPath(entry, '/') )
no = []
no.append("No")
no.append("Cancel deletion")
for entry in self.file_path:
if entry == self.file_path[0]:
continue
no.append("")
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel([yes, no], sync), 1)
def delete(self):
target = self.file_path
progress = Progress()
fillProgress(progress, target)
for file_path in target:
command = SyncCommandDelete(file_path, getConfigFile(file_path), progress=progress, onSave=False, disregardIgnore=False, whitelistConnections=[])
self.addWhitelistConnections(command)
command.addOnFinish(self.getOnFinish())
command.execute()
class RemoteNavigator(RemoteThread):
def __init__(self, config, last = False):
self.config = config
self.last = last
self.command = None
RemoteThread.__init__(self)
def setCommand(self, command):
self.command = command
def run(self):
if self.command is None:
if self.last is True:
command = SyncNavigator(None, navigateLast['config_file'], navigateLast['connection_name'], None, navigateLast['path'])
else:
command = SyncNavigator(None, self.config)
else:
command = self.command
self.addWhitelistConnections(command)
command.execute()
# ==== Commands ===========================================================================
# Sets up a config file in a directory
class FtpSyncNewSettings(sublime_plugin.WindowCommand):
def run(self, edit = None, dirs = []):
if len(dirs) == 0:
if sublime.active_window() is not None and sublime.active_window().active_view() is not None:
dirs = [os.path.dirname(sublime.active_window().active_view().file_name())]
elif sublime.active_window() is not None:
sublime.active_window().show_input_panel('Enter setup path', '', self.create, None, None)
return
else:
printMessage("Cannot setup file - no folder path selected and no active view (opened file) detected")
return
self.create(dirs)
def create(self, dirs):
if type(dirs) is Types.text:
dirs = [dirs]
for file_path in dirs:
if os.path.exists(file_path) is False:
printMessage("Setup: file path does not exist: " + file_path)
return
if sublime.version()[0] >= '3':
content = sublime.load_resource('Packages/FTPSync/ftpsync.default-settings').replace('\r\n', '\n')
for directory in dirs:
config = os.path.join(directory, configName)
if os.path.exists(config) is False:
with open(config, 'w') as configFile:
printMessage("Settings file created in: " + config)
configFile.write(content)
self.window.open_file(config)
else:
default = os.path.join(sublime.packages_path(), 'FTPSync', connectionDefaultsFilename)
if os.path.exists(default) is False:
printMessage("Could not find default settings file in {" + default + "}")
default = os.path.join(__dir__, connectionDefaultsFilename)
printMessage("Trying filepath {" + default + "}")
for directory in dirs:
config = os.path.join(directory, configName)
invalidateConfigCache(directory)
if os.path.exists(config) is False:
printMessage("Settings file created in: " + config)
shutil.copyfile(default, config)
self.window.open_file(config)
# Synchronize up selected file/directory
class FtpSyncTarget(sublime_plugin.WindowCommand):
def run(self, edit, paths):
def execute(files):
RemoteSyncCall(files, None, False).start()
files = gatherFiles(paths)
fillPasswords(files, execute, sublime.active_window())
# Synchronize up selected file/directory with delay and watch
class FtpSyncTargetDelayed(sublime_plugin.WindowCommand):
def run(self, edit, paths):
def execute(files):
RemoteSyncCall(files, None, True, forcedSave = True).start()
files = gatherFiles(paths)
fillPasswords(files, execute, sublime.active_window())
# Synchronize up current file
class FtpSyncCurrent(sublime_plugin.TextCommand):
def run(self, edit):
file_path = sublime.active_window().active_view().file_name()
def execute(files):
RemoteSyncCall(files[0][0], files[0][1], False).start()
fillPasswords([[ file_path, getConfigFile(file_path) ]], execute, sublime.active_window())
# Synchronize down current file
class FtpSyncDownCurrent(sublime_plugin.TextCommand):
def run(self, edit):
file_path = sublime.active_window().active_view().file_name()
def execute(files):
RemoteSyncDownCall(files[0][0], files[0][1], True, False).start()
fillPasswords([[ file_path, getConfigFile(file_path) ]], execute, sublime.active_window())
# Checks whether there's a different version of the file on server
class FtpSyncCheckCurrent(sublime_plugin.TextCommand):
def run(self, edit):
file_path = sublime.active_window().active_view().file_name()
view = sublime.active_window()
def execute(files):
RemoteSyncCheck(file_path, view, True).start()
fillPasswords([[ file_path, getConfigFile(file_path) ]], execute, sublime.active_window())
# Checks whether there's a different version of the file on server
class FtpSyncRenameCurrent(sublime_plugin.TextCommand):
def run(self, edit):
view = sublime.active_window()
self.original_path = sublime.active_window().active_view().file_name()
self.folder = os.path.dirname(self.original_path)
self.original_name = os.path.basename(self.original_path)
if self.original_path in checksScheduled:
checksScheduled.remove(self.original_path)
view.show_input_panel('Enter new name', self.original_name, self.rename, None, None)
def rename(self, new_name):
def action():
def execute(files):
RemoteSyncRename(self.original_path, getConfigFile(self.original_path), new_name).start()
fillPasswords([[ self.original_path, getConfigFile(self.original_path) ]], execute, sublime.active_window())
new_path = os.path.join(os.path.dirname(self.original_path), new_name)
if os.path.exists(new_path):
def sync(index):
if index is 0:
printMessage("Renaming: overwriting local target")
action()
else:
printMessage("Renaming: keeping original")
overwrite = []
overwrite.append("Overwrite local file? Already exists in:")
overwrite.append("Path: " + new_path)
cancel = []
cancel.append("Cancel renaming")
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel([ overwrite, cancel ], sync), 1)
else:
action()
# Synchronize down selected file/directory
class FtpSyncDownTarget(sublime_plugin.WindowCommand):
def run(self, edit, paths, forced=False):
filelist = []
for path in paths:
filelist.append( [ path, getConfigFile(path) ] )
def execute(files):
RemoteSyncDownCall(filelist, None, forced=forced).start()
fillPasswords(filelist, execute, sublime.active_window())
# Renames a file on disk and in folder
class FtpSyncRename(sublime_plugin.WindowCommand):
def run(self, edit, paths):
self.original_path = paths[0]
self.folder = os.path.dirname(self.original_path)
self.original_name = os.path.basename(self.original_path)
if self.original_path in checksScheduled:
checksScheduled.remove(self.original_path)
self.window.show_input_panel('Enter new name', self.original_name, self.rename, None, None)
def rename(self, new_name):
def action():
def execute(files):
RemoteSyncRename(self.original_path, getConfigFile(self.original_path), new_name).start()
fillPasswords([[ self.original_path, getConfigFile(self.original_path) ]], execute, sublime.active_window())
new_path = os.path.join(os.path.dirname(self.original_path), new_name)
if os.path.exists(new_path):
def sync(index):
if index is 0:
printMessage("Renaming: overwriting local target")
action()
else:
printMessage("Renaming: keeping original")
overwrite = []
overwrite.append("Overwrite local file? Already exists in:")
overwrite.append("Path: " + new_path)
cancel = []
cancel.append("Cancel renaming")
cancel.append("")
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel([ overwrite, cancel ], sync), 1)
else:
action()
# Removes given file(s) or folders
class FtpSyncDelete(sublime_plugin.WindowCommand):
def run(self, edit, paths):
filelist = []
for path in paths:
filelist.append( [ path, getConfigFile(path) ] )
def execute(files):
RemoteSyncDelete(paths).start()
fillPasswords(filelist, execute, sublime.active_window())
# Remote ftp navigation
class FtpSyncBrowse(sublime_plugin.WindowCommand):
def run(self, edit = None):
if hasActiveView() is False:
file_path = os.path.dirname(guessConfigFile(sublime.active_window().folders()))
else:
file_path = os.path.dirname(sublime.active_window().active_view().file_name())
def execute(files):
command = SyncNavigator(None, getConfigFile(file_path), None, file_path)
call = RemoteNavigator(getConfigFile(file_path))
call.setCommand(command)
call.start()
fillPasswords([[ file_path, getConfigFile(file_path) ]], execute, sublime.active_window())
# Remote ftp navigation
class FtpSyncBrowsePlace(sublime_plugin.WindowCommand):
def run(self, edit = None, paths = None):
if os.path.isdir(paths[0]):
file_path = paths[0]
else:
file_path = os.path.dirname(paths[0])
def execute(files):
command = SyncNavigator(None, getConfigFile(file_path), None, file_path)
call = RemoteNavigator(getConfigFile(file_path))
call.setCommand(command)
call.start()
fillPasswords([[ file_path, getConfigFile(file_path) ]], execute, sublime.active_window())
# Remote ftp navigation from current file
class FtpSyncBrowseCurrent(sublime_plugin.TextCommand):
def run(self, edit = None):
if hasActiveView() is False:
file_path = os.path.dirname(guessConfigFile(sublime.active_window().folders()))
else:
file_path = sublime.active_window().active_view().file_name()
def execute(files):
command = SyncNavigator(os.path.dirname(file_path), getConfigFile(file_path), None, os.path.dirname(file_path))
call = RemoteNavigator(getConfigFile(file_path))
call.setCommand(command)
call.start()
fillPasswords([[ file_path, getConfigFile(file_path) ]], execute, sublime.active_window())
# Remote ftp navigation from last point
class FtpSyncBrowseLast(sublime_plugin.WindowCommand):
def run(self, edit = None):
if navigateLast['config_file'] is None:
if hasActiveView() is False:
file_path = os.path.dirname(guessConfigFile(sublime.active_window().folders()))
else:
file_path = sublime.active_window().active_view().file_name()
def execute(files):
command = SyncNavigator(None, getConfigFile(file_path), None, file_path)
call = RemoteNavigator(getConfigFile(file_path))
call.setCommand(command)
call.start()
fillPasswords([[ file_path, getConfigFile(file_path) ]], execute, sublime.active_window())
else:
def execute(files):
RemoteNavigator(None, True).start()
fillPasswords([[ None, getConfigFile(navigateLast['config_file']) ]], execute, sublime.active_window())
# Show connection info
class FtpSyncShowInfo(sublime_plugin.WindowCommand):
def run(self, edit, paths):
file_path = paths[0]
def execute(files):
ShowInfo(None, getConfigFile(file_path)).execute(sublime.active_window())
fillPasswords([[ file_path, getConfigFile(file_path) ]], execute, sublime.active_window())
# Open FTPSync Github page
class FtpSyncUrlReadme(sublime_plugin.WindowCommand):
def run(self):
webbrowser.open("https://github.com/NoxArt/SublimeText2-FTPSync", 2, True)
# Open FTPSync Github New Issue page
class FtpSyncUrlReport(sublime_plugin.WindowCommand):
def run(self):
webbrowser.open("https://github.com/NoxArt/SublimeText2-FTPSync/issues/new", 2, True)
# Open FTPSync Donate page
class FtpSyncUrlDonate(sublime_plugin.WindowCommand):
def run(self):
webbrowser.open("http://ftpsync.noxart.cz/donate.html", 2, True)
# Base class for option toggling
class FTPSyncToggleSettings(sublime_plugin.TextCommand):
def run(self, edit):
config_file_path = getConfigFile(self.view.file_name())
if config_file_path is None:
return printMessage("No config file found")
overrideConfig(config_file_path, self.property_name, self.property_value_from)
def is_visible(self):
if self.view is None or self.view.file_name() is None:
return False
config_file_path = getConfigFile(self.view.file_name())
if config_file_path is None:
return False
config = loadConfig(config_file_path)
if type(config) is not dict:
return False
for name in config['connections']:
if config['connections'][name]['upload_on_save'] is self.property_value_to:
return True
return False
# Alters overrideConfig to enable upload_on_save
class FtpSyncEnableUos(FTPSyncToggleSettings):
property_name = 'upload_on_save'
property_value_from = True
property_value_to = False
# Alters overrideConfig to disable upload_on_save
class FtpSyncDisableUos(FTPSyncToggleSettings):
property_name = 'upload_on_save'
property_value_from = False
property_value_to = True
class FtpSyncCleanup(sublime_plugin.WindowCommand):
def run(self, edit, paths):
self.files = []
for path in paths:
self.files.extend(gatherMetafiles('*.ftpsync.temp', path))
self.prompt()
def prompt(self):
if len(self.files) == 0:
printMessage("No temporary files found")
return
toRemove = []
toRemove.append("Remove these temporary files?")
for path in self.files:
toRemove.append(os.path.join(os.path.dirname(path), os.path.basename(path)))
cancel = []
cancel.append("Cancel removal")
for path in self.files:
cancel.append("")
sublime.set_timeout(lambda: sublime.active_window().show_quick_panel([ toRemove, cancel ], self.remove), 1)
def remove(self, index):
if hasattr(self, 'files') and index == 0:
for path in self.files:
os.remove(path)
printMessage("Removed tempfile: " + path)
| NoxArt/SublimeText2-FTPSync | FTPSync.py | Python | mit | 100,641 |
# 2015.03.10 14:42:13 UTC
import sys
import os
path = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(path + '/utils/Python_Utils')
sys.path.append(path + '../utils/Python_Utils')
sys.path.append(path + '/utils/liac-arff')
sys.path.append(path + '../utils/liac-arff')
import numpy as np
import scipy as sc
import scipy.sparse as sp
import logging,Logger
label_flag = u'multi_label_';
class SvmWriter:
def __init__(self, filename, num_feature, num_label):
self.file = open(filename, "w")
line = "#num_feature=%d num_label=%d\n"%(num_feature,num_label)
self.file.write(line)
def write(self, x, y):
m,n = x.shape
labels = [[] for r in xrange(m)]
features = [[] for r in xrange(m)]
ij = x.nonzero()
for k in xrange(len(ij[0])):
i = ij[0][k]
j = ij[1][k]
features[i].append("%d:%f"%(j,x[i,j]))
ij = y.nonzero()
for k in xrange(len(ij[0])):
i = ij[0][k]
j = ij[1][k]
labels[i].append("%d"%j)
for i in xrange(m):
#print features[i]
line = ",".join(labels[i]) + " " + " ".join(features[i]) + "\n"
#print line
self.file.write(line)
def close(self):
self.file.close()
class SvmReader:
def __init__(self, filename, batch = 50):
self.file = open(filename)
self.batch = batch
self.num_label = 0
self.num_feature = 0
self.next_x = None
self.next_y = None
##read the comment line
## the comment line should give num_feature and num_labels
## for example '#num_feature=6\tnum_label=10'
line = self.file.readline()
line = line.strip()
line = line.replace("#", "")
eles = line.split(" ")
#print "eles",eles
#print "eles[0].split('=')",eles[0].split("=")
#print "int((eles[0].split('='))[0])", int((eles[0].split("="))[1])
self.num_feature = int((eles[0].split("="))[1])
self.num_label = int((eles[1].split("="))[1])
def parse(self,lines):
num_ins = len(lines)
if num_ins == 0:
return None, None
#x = sp.lil_matrix((num_ins, self.num_feature))
#y = sp.lil_matrix((num_ins, self.num_label))
xr = []
xc = []
xd = []
yr = []
yc = []
yd = []
for i in xrange(len(lines)):
line = lines[i]
line = line.strip()
eles = line.split(" ")
if ":" not in eles[0]:
for j in xrange(1,len(eles)):
kv = eles[j].split(":")
#x[i,int(kv[0])] = float(kv[1])
xr.append(i)
xc.append(int(kv[0]))
xd.append(float(kv[1]))
labels = eles[0].strip().split(",")
#print "xxx",line,labels
for j in xrange(len(labels)):
#y[i,int(labels[j])] = 1
yr.append(i)
yc.append(int(labels[j]))
yd.append(1)
else:
for j in xrange(0,len(eles)):
kv = eles[j].split(":")
#x[i,int(kv[0])] = float(kv[1])
xr.append(i)
xc.append(int(kv[0]))
xd.append(float(kv[1]))
xi = sp.csr_matrix((xd,(xr,xc)),(num_ins,self.num_feature))
yi = sp.csr_matrix((yd,(yr,yc)),(num_ins,self.num_label))
return xi, yi
def full_read(self):
lines = []
for line in self.file:
if line is None or len(line.strip()) == 0: break
#print "full_read",line
lines.append(line.strip())
return self.parse(lines)
def read(self):
if None == self.next_x:
lines = []
for i in xrange(self.batch):
line = self.file.readline()
if line is None or len(line.strip()) == 0: break
lines.append(line)
self.next_x, self.next_y = self.parse(lines)
x = self.next_x
y = self.next_y
lines = []
for i in xrange(self.batch):
line = self.file.readline()
if line is None or len(line.strip()) == 0: break
lines.append(line)
self.next_x, self.next_y = self.parse(lines)
has_next = not (self.next_x is None);
return x, y, has_next;
def close(self):
self.file.close()
| rustle1314/latent_factor_multi_label | arffio.py | Python | mit | 4,722 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, print_function
"""
This module applies an old style bom submission to a bom file in
"oe/palm/conf/distro/nova-bom.inc" which is assumed to hold a nova
classic style bom.
"""
import os
import ZAbom
def getopts():
import argparse
parser = argparse.ArgumentParser(description='apply a submission.')
parser.add_argument('requester')
parser.add_argument('bomfile')
parser.add_argument('speculation')
options = parser.parse_args()
if '<' in options.requester:
# rip out requester email
toss, toss, hold = options.requester.partition('<')
options.requester, toss, toss = hold.rpartition('>')
if options.speculation[0] == "'" and options.speculation[-1] == "'":
options.speculation = options.speculation[1:-1]
return options
if __name__ == '__main__':
options = getopts()
bom = ZAbom.bom(options.bomfile, dereference=False)
owners = bom.speculate(options.speculation, options.requester)
tmpfile = options.bomfile + '-new'
bom.write(tmpfile)
os.rename(tmpfile, options.bomfile)
print(owners)
| rich-pixley/zoo-animals | za-pre/Speculator.py | Python | apache-2.0 | 1,799 |
"""Display a "Pastebin" menu that allows you to pastebin files easily.
If a part of the file is selected when you click something in the "Pastebin"
menu, then only the selected part of the file is shared.
"""
# TODO: make this work with pythonprompt plugin?
from __future__ import annotations
import logging
import socket
import ssl
import tkinter
import webbrowser
from functools import partial
from http.client import HTTPConnection, HTTPSConnection
from tkinter import messagebox, ttk
from typing import Any, ClassVar, Type, cast
from urllib.parse import urlencode
from urllib.request import HTTPSHandler, Request, build_opener
from pygments.lexer import LexerMeta
from porcupine import get_main_window, menubar, tabs, utils
log = logging.getLogger(__name__)
DPASTE_URL = "https://dpaste.com/api/v2/"
TERMBIN_HOST_AND_PORT = ("termbin.com", 9999)
class Paste:
name: ClassVar[str]
def __init__(self) -> None:
self.canceled = False
def get_socket(self) -> socket.socket | ssl.SSLSocket | None:
raise NotImplementedError
# runs in a new thread
def run(self, code: str, lexer_class: LexerMeta) -> str:
raise NotImplementedError
def cancel(self) -> bool:
sock = self.get_socket()
if sock is None:
log.info("can't cancel yet")
return False
log.debug("canceling (shutting down socket)")
sock.shutdown(socket.SHUT_RDWR)
log.debug("canceling done")
self.canceled = True
return True
class Termbin(Paste):
name = "termbin.com"
def __init__(self) -> None:
super().__init__()
self._socket: socket.socket | None = None
def get_socket(self) -> socket.socket | None:
return self._socket
def run(self, code: str, lexer_class: LexerMeta) -> str:
with socket.socket() as self._socket:
self._socket.connect(TERMBIN_HOST_AND_PORT)
self._socket.sendall(code.encode("utf-8"))
url = self._socket.recv(1024)
# today termbin adds zero bytes to my URL's 0_o it hasn't done sit before
# i've never seen it add \r but i'm not surprised if it adds it
return url.rstrip(b"\n\r\0").decode("ascii")
# Hello there, random person reading my code. You are probably wondering why in
# the world I am using urllib instead of requests.
#
# It doesn't seem to be possible to access the underlying socket that requests
# uses without relying on _methods_named_like_this. We need that socket for
# canceling the pastebinning. For example, https://stackoverflow.com/a/32311849
# is useless because it gives the socket after it's connected, and most of the
# pastebinning time is spent connecting the socket (on my system).
class MyHTTPConnection(HTTPConnection):
def connect(self) -> None:
# Unlike HTTPConnection.connect, this creates the socket so that it is
# assinged to self.sock before it's connected.
self.sock: socket.socket | ssl.SSLSocket = socket.socket()
self.sock.connect((self.host, self.port))
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# HTTPSConnection does super().connect(), which calls MyHTTPConnection.connect,
# and then it SSL-wraps the socket created by MyHTTPConnection.
class MyHTTPSConnection(HTTPSConnection, MyHTTPConnection):
def __init__(self, *args: Any, dpaste: DPaste, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._dpaste = dpaste
# https://github.com/python/mypy/issues/10049
@property # type: ignore
def sock(self) -> socket.socket | ssl.SSLSocket: # type: ignore
return self.__sock
@sock.setter
def sock(self, new_sock: socket.socket | ssl.SSLSocket) -> None:
# Canceling with the non-SSL socket fails because making the SSL socket
# closes the non-SSL socket. So, don't tell the dpaste object about
# being able to cancel until self.sock is set to SSL socket.
self.__sock = new_sock
if isinstance(new_sock, ssl.SSLSocket):
self._dpaste.connection = self
class DPaste(Paste):
name = "dpaste.com"
def __init__(self) -> None:
super().__init__()
self.connection: MyHTTPSConnection | None = None
def get_socket(self) -> ssl.SSLSocket | None:
if self.connection is None:
return None
return cast(ssl.SSLSocket, self.connection.sock)
def run(self, code: str, lexer_class: LexerMeta) -> str:
# kwargs of do_open() go to MyHTTPSConnection
handler = HTTPSHandler()
handler.https_open = partial(handler.do_open, MyHTTPSConnection, dpaste=self) # type: ignore
# docs: https://dpaste.com/api/v2/
# dpaste.com's syntax highlighting choices correspond with pygments lexers (see tests)
request = Request(
DPASTE_URL,
data=urlencode({"syntax": lexer_class.aliases[0], "content": code}).encode("utf-8"),
)
with build_opener(handler).open(request) as response:
return response.read().decode().strip()
class SuccessDialog(tkinter.Toplevel):
def __init__(self, url: str):
super().__init__()
self.url = url # accessed in tests
content = ttk.Frame(self, padding=10)
content.pack(fill="both", expand=True)
content.columnconfigure(0, weight=1)
label = ttk.Label(content, text="Here's your link:")
label.grid(row=0, column=0)
self._entry = ttk.Entry(content, justify="center")
self._entry.grid(row=1, column=0, sticky="we", pady=(10, 30))
self._entry.insert(0, url)
self._entry.config(state="readonly") # must be after the insert
self.bind("<FocusIn>", self._select_all, add=True)
self._select_all()
button_info = [
("Open in browser", self.open_in_browser),
("Copy to clipboard", self.copy_to_clipboard),
("Close this dialog", self.destroy),
]
buttonframe = ttk.Frame(content)
buttonframe.grid(row=2, column=0, sticky="we")
for (text, callback), padx in zip(button_info, [(0, 5), (5, 5), (5, 0)]):
ttk.Button(buttonframe, text=text, command=callback).pack(
side="left", expand=True, fill="x", padx=padx
)
def _select_all(self, event: tkinter.Event[tkinter.Misc] | None = None) -> None:
# toplevels annoyingly get notified of child events
if event is None or event.widget is self:
self._entry.selection_range(0, "end")
self._entry.focus()
def open_in_browser(self) -> None:
webbrowser.open(self.url)
self.destroy()
def copy_to_clipboard(self) -> None:
self.clipboard_clear()
self.clipboard_append(self.url)
def make_please_wait_window(paste: Paste) -> tkinter.Toplevel:
window = tkinter.Toplevel()
window.transient(get_main_window())
window.title("Pasting...")
window.geometry("350x150")
window.resizable(False, False)
window.protocol("WM_DELETE_WINDOW", paste.cancel)
content = ttk.Frame(window)
content.pack(fill="both", expand=True)
label = ttk.Label(
content, font=("", 12, ()), text=f"Pasting to {type(paste).name}, please wait..."
)
label.pack(expand=True)
progressbar = ttk.Progressbar(content, mode="indeterminate")
progressbar.pack(fill="x", padx=15, pady=15)
progressbar.start()
ttk.Button(content, text="Cancel", command=paste.cancel).pack(pady=15)
get_main_window().tk.call("tk", "busy", "hold", get_main_window())
return window
def pasting_done_callback(
paste: Paste, please_wait_window: tkinter.Toplevel, success: bool, result: str
) -> None:
get_main_window().tk.call("tk", "busy", "forget", get_main_window())
please_wait_window.destroy()
if success:
if result.startswith(("http://", "https://")):
log.info("pasting succeeded")
dialog = SuccessDialog(url=result)
dialog.title("Pasting Succeeded")
dialog.resizable(False, False)
dialog.transient(get_main_window())
dialog.wait_window()
else:
log.error(f"pastebin {paste.name!r} returned invalid url: {result!r}")
messagebox.showerror(
"Pasting failed", f"Instead of a valid URL, {type(paste).name} returned {result!r}."
)
elif paste.canceled:
# Log error with less dramatic log level and don't show in GUI
log.debug("Pasting failed and was cancelled. Here is the error.", exc_info=True)
else:
# result is the traceback as a string
log.error(f"pasting failed\n{result}")
messagebox.showerror(
"Pasting failed", "Check your internet connection or try a different pastebin."
)
def start_pasting(paste_class: Type[Paste], tab: tabs.FileTab) -> None:
lexer_class = tab.settings.get("pygments_lexer", LexerMeta)
try:
code = tab.textwidget.get("sel.first", "sel.last")
except tkinter.TclError:
# nothing is selected, pastebin everything
code = tab.textwidget.get("1.0", "end - 1 char")
paste = paste_class()
plz_wait = make_please_wait_window(paste)
utils.run_in_thread(
partial(paste.run, code, lexer_class), partial(pasting_done_callback, paste, plz_wait)
)
def setup() -> None:
for klass in [DPaste, Termbin]:
assert "/" not in klass.name
menubar.add_filetab_command(f"Pastebin/{klass.name}", partial(start_pasting, klass))
| Akuli/editor | porcupine/plugins/pastebin.py | Python | mit | 9,598 |
#!/usr/bin/env python
# PyCal - Python web calendar
#
# Copyright (C) 2004-6 Ray Osborn
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: ModifyEvent.py,v 1.5 2005/06/12 04:30:42 osborn Exp $
#
"""
CGI script to modify a calendar event.
"""
from pycal.PyCal import *
from pycal.Editor import Contact
from pycal.Event import Event, TemporaryEvent
from pycal.GetModule import GetEditors, GetEditor, GetSupervisors, GetContacts
from pycal.PrintModule import LoginPage, ErrorPage, DayView
from pycal.CGImodule import CGIlogin, CGIgetForm
from pycal.Utilities import ReadDate, ReadTime, Today, MakeRepeats, Now, AddDay
from pycal.Utilities import IDexists, IDdate, ConvertCRLFs
def main():
try:
form = CGIgetForm()
user = CGIlogin(form)
if user == "admin" or user in GetSupervisors():
supervisor = True
else:
supervisor = False
if user is None:
print LoginPage(script="ModifyEvent.py", form=form)
return
if form.has_key("ID"):
ID = form["ID"]
if not IDexists(ID):
raise CalendarError, "No event to edit"
if ID.find("tmp") >= 0:
e = TemporaryEvent(ID)
else:
e = TemporaryEvent()
e.Copy(Event(ID))
e.oldID = ID
e.oldDate = e.start[0:3]
else:
e = TemporaryEvent()
if form.has_key("title"):
e.title = form["title"]
if form.has_key("type"):
e.type = form["type"]
else:
e.type = "Event"
if form.has_key("description"):
e.description = ConvertCRLFs(form["description"])
else:
e.description = ""
if form.has_key("location"):
if form["location"] == "Location...":
e.location = ""
else:
e.location = form["location"]
try:
if type == "Banner" or type == "Holiday":
e.start = ReadTime(form["startyear"], form["startmonth"],
form["startday"], 12, 0, "AM")
e.end = e.start
else:
e.start = ReadTime(form["startyear"], form["startmonth"],
form["startday"], form["starthour"],
form["startminute"],
form["startampm"].upper())
e.end = ReadTime(form["startyear"], form["startmonth"],
form["startday"], form["endhour"],
form["endminute"],
form["endampm"].upper(),
end=True)
except KeyError:
if not hasattr(e, "oldID"):
raise CalendarError, "Invalid date/time entry"
if form.has_key("cancel"):
if e.start:
y, m, d = e.start[0:3]
else:
y, m, d = Today()
print DayView(y, m, d)
return
if form.has_key("name") and form["name"] <> "Name...":
if form["name"] in GetEditors(name=True):
c = GetEditor(form["name"])
else:
c = Contact(form["name"])
e.organizer = c.name
e.phone = c.phone
e.email = c.email
elif form.has_key("organizer"):
e.organizer = form["organizer"]
if form.has_key("phone") and form.has_key("email"):
e.phone = form["phone"]
e.email = form["email"]
else:
if e.organizer in GetEditors(name=True):
c = GetEditor(e.organizer)
elif e.organizer in GetContacts():
c = Contact(e.organizer)
else:
c = None
if form.has_key("phone"):
e.phone = form["phone"]
elif c:
e.phone = c.phone
if form.has_key("email"):
e.email = form["email"]
elif c:
e.email = c.email
else:
e.organizer = ""
try:
e.reservation["option"] = form["reserve"]
if e.reservation["option"] == "Same as Event":
e.reservation["start"] = e.start
e.reservation["end"] = e.end
elif e.reservation["option"] == "All Day":
e.reservation["start"] = ReadTime(form["startyear"],
form["startmonth"],
form["startday"],
"12", "00", "AM")
e.reservation["end"] = ReadTime(form["startyear"],
form["startmonth"],
form["startday"],
"12", "00", "AM",
end=True)
else:
e.reservation["start"] = ReadTime(form["startyear"],
form["startmonth"],
form["startday"],
form["resstarthour"],
form["resstartminute"],
form["resstartampm"].upper())
e.reservation["end"] = ReadTime(form["startyear"],
form["startmonth"],
form["startday"],
form["resendhour"],
form["resendminute"],
form["resendampm"].upper(),
end=True)
if e.reservation["start"] > e.start:
e.reservation["start"] = e.start
if e.reservation["end"] < e.end:
e.reservation["end"] = e.end
except KeyError:
if not hasattr(e, "oldID"):
raise CalendarError, "Invalid reservation time"
if form.has_key("locations"):
if isinstance(form["locations"], list):
e.locations = form["locations"]
else:
e.locations = [form["locations"]]
if "Locations..." in e.locations:
e.locations.remove("Locations...")
else:
e.locations = []
if e.location:
try:
e.locations.remove(e.location)
except ValueError:
pass
e.locations.insert(0, e.location)
if form.has_key("resources"):
if isinstance(form["resources"], list):
e.resources = form["resources"]
else:
e.resources = [form["resources"]]
if "Resources..." in e.resources:
e.resources.remove("Resources...")
else:
e.resources = []
if form.has_key("categories"):
if isinstance(form["categories"], list):
e.categories = form["categories"]
else:
e.categories = [form["categories"]]
if "Categories..." in e.categories:
e.categories.remove("Categories...")
else:
e.categories = []
if form.has_key("setup"):
e.setup = ConvertCRLFs(form["setup"])
else:
e.setup = ""
if form.has_key("status"):
e.status = form["status"]
if form.has_key("editor"):
e.editor = form["editor"]
if form.has_key("pattern"):
e.pattern = form["pattern"]
if form["pattern"] <> "Once Only":
e.number, e.final = None, None
if form.has_key("number"):
e.number = int(form["number"])
if form.has_key("endyear"):
e.final = ReadDate(form["endyear"], form["endmonth"],
form["endday"])
e.repeats = MakeRepeats(e.pattern, e.start, e.final, e.number)
else:
e.repeats = []
elif form.has_key("repeat"):
if form["repeat"] == "single":
e.repeats = []
e.pattern = "Once Only"
elif form.has_key("repeats"):
if isinstance(form["repeats"], list):
e.repeats = form["repeats"]
else:
e.repeats = [form["repeats"]]
if form["repeat"] == "future":
e.repeats = filter(lambda ID:IDdate(ID)>=Today(), e.repeats)
else:
e.repeats = []
message = None
if e.type <> "Banner" and e.type <> "Holiday" and e.end < e.start:
message = "Warning: End of event precedes the start."
elif e.type <> "Banner" and e.type <> "Holiday" and e.end == e.start:
message = "Warning: Event has no duration."
elif e.start < Now():
message = "Warning: This event is in the past."
elif not e.title:
message = "Warning: No title given for event."
if hasattr(e, "oldDate"):
if e.oldDate <> e.start[0:3] and e.repeats:
message = \
"Warning: Changing the date will break the link with event repeats."
e.repeats = []
e.pattern = "Once Only"
e.Store()
print e.EventView(message=message, updating=True)
except CalendarError, errorText:
print ErrorPage(errorText)
if __name__ == "__main__":
main()
| rayosborn/pycal | scripts/ModifyEvent.py | Python | lgpl-3.0 | 10,619 |
"""Evaluation for classification"""
import datetime
import time
def confusion_matrix(model, words, labels, classorder):
"""Build confusion matrix for model"""
result = {}
for cla in classorder:
result[cla] = {}
for inner in classorder:
result[cla][inner] = 0
# This gets the time to apply topics to the test documents and predict their values
start = time.time()
predictions = model.predict(words)
end = time.time()
devtest_time = datetime.timedelta(seconds=end-start)
for prediction, label in zip(predictions, labels):
result[label][prediction] += 1
return result, devtest_time
def accuracy(confusion_matrix):
"""Calculate true positive rate based on confusion_matrix"""
total = 0
correct = 0
for label, preds in confusion_matrix.items():
for pred, count in preds.items():
if pred == label:
correct += count
total += count
return correct / total
| nOkuda/classtm | classtm/evaluate.py | Python | gpl-3.0 | 996 |
#!/usr/bin/env python
#
# Copyright (c) Pivotal Inc 2014. All Rights Reserved.
#
import os
import re
import shutil
import unittest
from collections import defaultdict
from gppylib.gpversion import GpVersion
from gppylib.commands.base import Command, CommandResult, ExecutionError
from mock import patch, MagicMock, Mock, mock_open
from gppylib.operations.persistent_rebuild import ValidateContentID, DbIdInfo, GetDbIdInfo, BackupPersistentTableFiles,\
RebuildTable, RebuildPersistentTables, ValidatePersistentBackup,\
RunBackupRestore, ValidateMD5Sum
remove_per_db_pt_entry = False
remove_global_pt_entry = False
def pt_query_side_effect(*args, **kwargs):
# missing the global persistent table entry
GET_ALL_DATABASES = """select oid, datname from pg_database"""
PER_DATABASE_PT_FILES_QUERY = """SELECT relfilenode FROM pg_class WHERE oid IN (5094, 5095)"""
GLOBAL_PT_FILES_QUERY = """SELECT relfilenode FROM pg_class WHERE oid IN (5090, 5091, 5092, 5093)"""
if args[1] == GET_ALL_DATABASES:
return [[123, 'db1']]
elif args[1] == PER_DATABASE_PT_FILES_QUERY:
if remove_per_db_pt_entry:
return [[5095]]
else:
return [[5094], [5095]]
else:
if remove_global_pt_entry:
return [[5091], [5092], [5093]]
else:
return [[5090], [5091], [5092], [5093]]
class ValidateContentIDTestCase(unittest.TestCase):
def setUp(self):
self.contentid_validator = ValidateContentID(content_id=None,
contentid_file=None,
gparray=None)
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_valid_content_ids(self, mock1):
expected = [1, 2, 3]
file_contents = '1\n2\n3'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
self.assertEqual(expected, self.contentid_validator._validate_contentid_file())
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_spaces_content_ids(self, mock1):
expected = [1, 2, 3]
file_contents = ' 1\n2 \n3 \n'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
self.assertEqual(expected, self.contentid_validator._validate_contentid_file())
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_invalid_content_ids(self, mock1):
expected = [1, 2, 3]
file_contents = '1\nb\n3'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
with self.assertRaisesRegexp(Exception, 'Found non integer content id "b" in contentid file "/tmp/contentid_file"'):
self.contentid_validator._validate_contentid_file()
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_empty_file(self, mock1):
file_contents = ''
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
with self.assertRaisesRegexp(Exception, 'Please make sure there is atleast one integer content ID in the file'):
self.contentid_validator._validate_contentid_file()
@patch('os.path.isfile', return_value=False)
def test_validate_contentid_file_with_non_existent_file(self, mock1):
expected = [1, 2, 3]
file_contents = '1\nb\n3'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
with self.assertRaisesRegexp(Exception, 'Unable to find contentid file "/tmp/contentid_file"'):
self.contentid_validator._validate_contentid_file()
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_blank_lines(self, mock1):
expected = [1, 2]
file_contents = '1\n\n\n2'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
self.assertEqual(expected, self.contentid_validator._validate_contentid_file())
@patch('os.path.isfile', return_value=True)
def test_validate_contentid_file_with_negative_integers(self, mock1):
expected = [-1, 2]
file_contents = '-1\n2'
self.contentid_validator.contentid_file = '/tmp/contentid_file'
m = MagicMock()
m.return_value.__enter__.return_value.__iter__.return_value = iter(file_contents.split())
with patch('__builtin__.open', m, create=True):
self.assertEqual(expected, self.contentid_validator._validate_contentid_file())
def test_validate_content_id_with_valid_segments(self):
expected = [1, 2, 3]
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
self.assertEqual(expected, self.contentid_validator._validate_content_id())
def test_validate_content_id_with_invalid_segments(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId = Mock()
m.getSegmentContentId.return_value = i + 10
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
with self.assertRaisesRegexp(Exception, 'The following content ids are not present in gp_segment_configuration: 1, 2, 3'):
self.contentid_validator._validate_content_id()
def test_validate_content_id_with_primary_segment_down(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
if i == 1:
m.getSegmentStatus = Mock()
m.getSegmentStatus.return_value = 'd'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
self.contentid_validator._validate_content_id()
def test_validate_content_id_with_resync(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
m.getSegmentStatus.return_value = 'u'
if i == 1:
m.getSegmentMode.return_value = 'r'
else:
m.getSegmentMode.return_value = 's'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
with self.assertRaisesRegexp(Exception, 'Can not rebuild persistent tables for content ids that are in resync mode'):
self.contentid_validator._validate_content_id()
@patch('gppylib.operations.persistent_rebuild.ask_yesno', return_value=False)
def test_validate_content_id_with_some_others_resync(self, mock1):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
m.getSegmentStatus.return_value = 'u'
if m.getSegmentContentId.return_value in (1, 2):
m.getSegmentMode.return_value = 'r'
else:
m.getSegmentMode.return_value = 's'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [3]
with self.assertRaisesRegexp(Exception, 'Aborting rebuild due to user request'):
self.contentid_validator._validate_content_id()
def test_validate_content_id_with_change_tracking_segments(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId = Mock()
m.getSegmentContentId.return_value = (i % 3) + 1
if i == 1:
m.getSegmentStatus = Mock()
m.getSegmentStatus.return_value = 'c'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.contentid_validator.gparray = gparray
self.contentid_validator.content_id = [1, 2, 3]
self.assertEqual([1, 2, 3], self.contentid_validator._validate_content_id())
def test_parse_content_id(self):
self.contentid_validator.content_id = '1, 2, 3'
self.assertEqual([1, 2, 3], self.contentid_validator._parse_content_id())
def test_parse_content_id_valid_single_content_id(self):
self.contentid_validator.content_id = '-1'
self.assertEqual([-1], self.contentid_validator._parse_content_id())
def test_parse_content_id_invalid_comma_separated_list(self):
self.contentid_validator.content_id = '1, 2, 3,,'
with self.assertRaisesRegexp(Exception, 'Some content ids are not integers:'):
self.contentid_validator._parse_content_id()
def test_parse_content_id_invalid_integers(self):
self.contentid_validator.content_id = '1, 2, a, x,'
with self.assertRaisesRegexp(Exception, 'Some content ids are not integers:'):
self.contentid_validator._parse_content_id()
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_content_id', return_value=[1, 2, 3])
def test_validate_with_only_content_id(self, mock1):
self.contentid_validator.content_id = '1, 2, 3'
self.contentid_validator.contentid_file = None
self.assertEqual([1, 2, 3], self.contentid_validator.validate())
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_content_id', side_effect=Exception('ERROR'))
def test_validate_with_only_content_id_with_error(self, mock1):
self.contentid_validator.content_id = '1, 2, 3'
self.contentid_validator.contentid_file = None
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.contentid_validator.validate()
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_contentid_file', return_value=[1, 2, 3])
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_content_id', return_value=[1, 2, 3])
def test_validate_with_only_content_id_file(self, mock1, mock2):
self.contentid_validator.contentid_file = '/tmp/f1'
self.contentid_validator.content_id = None
self.assertEqual([1, 2, 3], self.contentid_validator.validate())
@patch('gppylib.operations.persistent_rebuild.ValidateContentID._validate_contentid_file', side_effect=Exception('ERROR'))
def test_validate_with_only_content_id_file_with_error(self, mock1):
self.contentid_validator.contentid_file = '/tmp/f1'
self.contentid_validator.content_id = None
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.contentid_validator.validate()
class GetDbIdInfoTestCase(unittest.TestCase):
def setUp(self):
self.dbid_info = GetDbIdInfo(gparray=None, content_id=None)
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', return_value=[(1000, '2000'), (1001, '2001 2002')])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_filespace_to_tablespace_map(self, mock1, mock2, mock3):
m = Mock()
m.getSegmentFilespaces.return_value = {1000: '/tmp/fs1', 1001: '/tmp/fs2'}
self.assertEqual({1000: [2000], 1001: [2001, 2002]}, self.dbid_info._get_filespace_to_tablespace_map(m))
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', return_value=[])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_filespace_to_tablespace_map_empty_filespaces(self, mock1, mock2, mock3):
m = Mock()
m.getSegmentFilespaces.return_value = {}
self.assertEqual({}, self.dbid_info._get_filespace_to_tablespace_map(m))
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', return_value=[(1000, '2000'), (1001, '2001 2002')])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_tablespace_to_dboid_map(self, mock1, mock2, mock3):
ts_oids = [1000, 1001]
self.assertEqual({1000: [2000], 1001: [2001, 2002]}, self.dbid_info._get_tablespace_to_dboid_map(ts_oids))
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', return_value=[])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_tablespace_to_dboid_map_empty_tablespaces(self, mock1, mock2, mock3):
ts_oids = []
self.assertEqual({}, self.dbid_info._get_tablespace_to_dboid_map(ts_oids))
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_no_matching_content_id(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [11, 12]
expected = []
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={1000: [2000, 2002], 1001: [2001, 2003]})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map',
return_value={2000: [12345], 2001: [2345, 4567], 2002: [8765, 4634], 2003: [3456]})
def test_get_info_with_single_matching_content_id(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
m.isSegmentDown.return_value = False
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {1000: [2000, 2002], 1001: [2001, 2003]},
{2000: [12345], 2001: [2345, 4567], 2002: [8765, 4634], 2003: [3456]}, False)]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_no_filespaces(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {}
m.isSegmentDown.return_value = False
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {}, {}, {}, False)]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_no_tablespaces(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
m.isSegmentDown.return_value = False
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {}, {}, False)]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_down_segments(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'd' if i == 3 else 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
m.isSegmentDown.return_value = False
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {}, {}, False)]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_segment_in_ct(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'c' if i == 3 else 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
m.isSegmentDown.return_value = False
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
gparray.isSegmentDown = Mock()
gparray.isSegmentDown.return_value = False
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {}, {}, False)]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_content_down(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'd' if i == 3 or i == 0 else 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
m.isSegmentDown.return_value = False
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [1, 10]
expected = [DbIdInfo(1, 'p', 2, 5001, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {}, {}, False)]
self.assertEqual(expected, self.dbid_info.get_info())
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_filespace_to_tablespace_map', return_value={})
@patch('gppylib.operations.persistent_rebuild.GetDbIdInfo._get_tablespace_to_dboid_map', return_value={})
def test_get_info_with_single_matching_content_id_and_mirror_down(self, mock1, mock2):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = (i + 1) % 3
m.getSegmentDbId.return_value = i + 2
m.getSegmentRole.return_value = 'p' if i < 3 else 'm'
m.getSegmentStatus.return_value = 'd' if i >= 3 else 'u'
m.getSegmentHostName.return_value = 'mdw1'
m.getSegmentPort.return_value = 5001 + i
m.getSegmentFilespaces.return_value = {1000: '/tmp/f1', 1001: '/tmp/f2'}
# We want to compare from the content ID
m.isSegmentDown.return_value = True if i >= 3 else False
mock_segs.append(m)
gparray = Mock()
gparray.getDbList = Mock()
gparray.getDbList.return_value = mock_segs
self.dbid_info.gparray = gparray
self.dbid_info.content_id = [2,10]
expected = [DbIdInfo(2, 'p', 3, 5002, 'mdw1', {1000: '/tmp/f1', 1001: '/tmp/f2'}, {}, {}, False)]
self.assertEqual(expected, self.dbid_info.get_info())
class BackupPersistentTableFilesTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create persistent table files under new filespace/tablespace/database,
# and also the default filespace, tablespace/database
# timestamp: 20140604101010
try:
# source files
os.makedirs(os.path.join('/tmp/p1', '2000', '123'))
os.makedirs(os.path.join('/tmp/p2', 'base', '234'))
os.makedirs(os.path.join('/tmp/p2', 'global'))
os.makedirs(os.path.join('/tmp/p2', 'pg_xlog'))
os.makedirs(os.path.join('/tmp/p2', 'pg_clog'))
os.makedirs(os.path.join('/tmp/p2', 'pg_distributedlog'))
os.makedirs(os.path.join('/tmp/p1', 'empty'))
open('/tmp/p1/2000/123/5094', 'w').close()
open('/tmp/p1/2000/123/5094.1', 'w').close()
open('/tmp/p1/2000/123/5095', 'w').close()
open('/tmp/p2/base/234/5094', 'w').close()
open('/tmp/p2/base/234/5095', 'w').close()
open('/tmp/p2/global/pg_control', 'w').close()
open('/tmp/p2/global/5090', 'w').close()
open('/tmp/p2/global/5091', 'w').close()
open('/tmp/p2/global/5092', 'w').close()
open('/tmp/p2/global/5093', 'w').close()
open('/tmp/p2/pg_xlog/0000', 'w').close()
open('/tmp/p2/pg_clog/0000', 'w').close()
open('/tmp/p2/pg_distributedlog/000', 'w').close()
# Backup files
os.makedirs(os.path.join('/tmp/p1', 'pt_rebuild_bk_20140604101010','2000', '123'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'base', '234'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'global'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'pg_xlog'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'pg_clog'))
os.makedirs(os.path.join('/tmp/p2', 'pt_rebuild_bk_20140604101010', 'pg_distributedlog'))
open('/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5094', 'w').close()
open('/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5094.1', 'w').close()
open('/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5095', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/base/234/5094', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/base/234/5095', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/pg_control', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/5090', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/5091', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/5092', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/global/5093', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/pg_xlog/0000', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/pg_clog/0000', 'w').close()
open('/tmp/p2/pt_rebuild_bk_20140604101010/pg_distributedlog/000', 'w').close()
except OSError:
pass
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree('/tmp/p1')
shutil.rmtree('/tmp/p2')
except Exception:
pass
def setUp(self):
self.backup_persistent_files = BackupPersistentTableFiles(dbid_info=None,
perdb_pt_filenames={2:{17088L:['5094', '5095'],1L: [5094L, 5095L]},
3:{17088L:['5094', '5095'],1L: [5094L, 5095L]}},
global_pt_filenames={2: ['5090', '5091', '5092', '5093'],
3: ['5090', '5091', '5092', '5093']},
timestamp='20140604101010')
@patch('os.makedirs')
def test_copy_files(self, mock1):
src_ptfiles = ['/tmp/global/5090', '/tmp/global/5091']
dst_ptfiles = ['/tmp1/global/5090', '/tmp1/global/5091']
self.backup_persistent_files.pool = Mock()
content = -1
actionType = 'backup'
m = Mock()
m.validate.return_value = {'/tmp/global/5090': 'abdfe', '/tmp/global/5091': 'abdfe',
'/tmp1/global/5090': 'abdfe', '/tmp1/global/5091': 'abdfe'}
self.backup_persistent_files.md5_validator = m
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
@patch('gppylib.operations.persistent_rebuild.Command.run')
def test_copy_files_with_restore(self, mock1, mock2):
src_ptfiles = ['/tmp/global/5090', '/tmp/global/5091']
dst_ptfiles = ['/tmp1/global/5090', '/tmp1/global/5091']
self.backup_persistent_files.pool = Mock()
m = Mock()
content = -1
actionType = 'restore'
m.validate.return_value = {'/tmp/global/5090': 'abdfe', '/tmp/global/5091': 'abdfe',
'/tmp1/global/5090': 'abdfe', '/tmp1/global/5091': 'abdfe'}
self.backup_persistent_files.md5_validator = m
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
def test_copy_files_without_errors_with_no_files(self, mock1):
src_ptfiles = []
dst_ptfiles = []
self.backup_persistent_files.pool = Mock()
m = Mock()
content = -1
actionType = 'backup'
m.validate.side_effect = [{}, {}]
self.backup_persistent_files.md5_validator = m
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
@patch('gppylib.operations.persistent_rebuild.Command.run')
def test_copy_files_without_errors_with_no_files_with_restore(self, mock1, mock2):
src_ptfiles = []
dst_ptfiles = []
self.backup_persistent_files.pool = Mock()
m = Mock()
content = -1
actionType = 'restore'
m.validate.side_effect = [{}, {}]
self.backup_persistent_files.md5_validator = m
self.backup_persistent_files.restore=True
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
def test_copy_files_with_md5_mismatch(self, mock1):
src_ptfiles = ['/tmp/global/5090', '/tmp/global/5091']
dst_ptfiles = ['/tmp1/global/5090', '/tmp1/global/5091']
self.backup_persistent_files.pool = Mock()
m = Mock()
content = -1
actionType = 'backup'
m.validate.return_value = {'/tmp/global/5090': 'asdfads', '/tmp/global/5091': 'abdfe',
'/tmp1/global/5090': 'asdfadsf', '/tmp1/global/5091': 'abdfe'}
self.backup_persistent_files.md5_validator = m
with self.assertRaisesRegexp(Exception, 'MD5 sums do not match! Expected md5 = "{\'/tmp/global/5090\': \'asdfads\'}",\
but actual md5 = "{\'/tmp1/global/5090\': \'asdfadsf\'}"'):
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
@patch('os.makedirs')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.validate', return_value={'5090': 'sdfadsf', '5091': 'sdfadsf'})
def test_copy_files_with_errors(self, mock1, mock2):
src_ptfiles = ['/tmp/global/5090', '/tmp/global/5091']
dst_ptfiles = ['/tmp1/global/5090', '/tmp1/global/5091']
m = Mock()
content = -1
actionType = 'backup'
m.check_results.side_effect = ExecutionError('Error !!!', Mock())
self.backup_persistent_files.pool = m
m.validate.return_value = {'5090': 'sdfadsf', '5091': 'sdfadsf'}
self.backup_persistent_files.md5_validator = m
with self.assertRaisesRegexp(ExecutionError, 'Error !!!'):
self.backup_persistent_files._copy_files(src_ptfiles, dst_ptfiles, content, actionType)
def test_build_PT_src_dest_pairs_filelist_None(self):
src_dir = ''
dest_dir = ''
file_list = None
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_filelist_Empty(self):
src_dir = ''
dest_dir = ''
file_list = []
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_non_exist_src_dir(self):
src_dir = 'tmp'
dest_dir = '/tmp'
file_list = ['5090']
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_empty_src_dir(self):
src_dir = '/tmp/p1/empty'
dest_dir = '/tmp/p1/empty'
file_list = ['5090']
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_with_file_missed(self):
src_dir = '/tmp/p1/'
dest_dir = '/tmp/p1/'
file_list = ['5555']
self.assertEqual((None, None), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
def test_build_PT_src_dest_pairs_with_extended_file_exist(self):
src_dir = '/tmp/p1/2000/123'
dest_dir = '/tmp/p1/pt_rebuild_bk_20140604101010/2000/123'
file_list = ['5094']
src_files = ['/tmp/p1/2000/123/5094', '/tmp/p1/2000/123/5094.1']
dest_files = ['/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5094', '/tmp/p1/pt_rebuild_bk_20140604101010/2000/123/5094.1']
self.assertEqual((src_files, dest_files), self.backup_persistent_files.build_PT_src_dest_pairs(src_dir, dest_dir, file_list))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_global_pt_files(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_global_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_PT_src_dest_pairs', return_value=[None, None])
def test_copy_global_pt_files_with_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Missing global persistent files from source directory.'):
self.backup_persistent_files._copy_global_pt_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_PT_src_dest_pairs', return_value=[None, None])
def test_copy_global_pt_files_without_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Missing global persistent files from source directory.'):
self.backup_persistent_files._copy_global_pt_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files',
side_effect=[Mock(), Exception('Error while backing up files')])
def test_copy_global_pt_files_with_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Backup of global persistent files failed'):
self.backup_persistent_files._copy_global_pt_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_global_pt_files_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_global_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_global_pt_files_with_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_global_pt_files(restore=True))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files',
side_effect=[Mock(), Exception('Error while backing up files')])
def test_copy_global_pt_files_with_restore_with_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Restore of global persistent files failed'):
self.backup_persistent_files._copy_global_pt_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_PT_src_dest_pairs', return_value=[None, None])
def test_copy_per_db_pt_files_with_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Missing per-database persistent files from source directory.'):
self.backup_persistent_files._copy_per_db_pt_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_PT_src_dest_pairs', return_value=[None, None])
def test_copy_per_db_pt_files_without_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Missing per-database persistent files from source directory.'):
self.backup_persistent_files._copy_per_db_pt_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files',
side_effect=[Mock(), Exception('Error while backing up files')])
def test_copy_per_db_pt_files_with_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Backup of per database persistent files failed'):
self.backup_persistent_files._copy_per_db_pt_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files_with_unused_filespace(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files_with_unused_tablespace(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_per_db_pt_files_with_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_per_db_pt_files(restore=True))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_Xactlog_files_without_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_Xactlog_files())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_Xactlog_src_dest_pairs', return_value=[[],[]])
def test_copy_Xactlog_files_without_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'should not be empty'):
self.backup_persistent_files._copy_Xactlog_files()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles.build_Xactlog_src_dest_pairs', return_value=[[],[]])
def test_copy_Xactlog_files_with_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'should not be empty'):
self.backup_persistent_files._copy_Xactlog_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_Xactlog_files_with_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_Xactlog_files(restore=True))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_pg_control_files_without_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_pg_control_file())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
def test_copy_pg_control_files_with_restore_without_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files._copy_pg_control_file(restore=True))
@patch('os.path.isfile', return_value=False)
def test_copy_pg_control_files_without_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Global pg_control file is missing from source directory'):
self.backup_persistent_files._copy_pg_control_file()
@patch('os.path.isfile', return_value=False)
def test_copy_pg_control_files_with_restore_with_failure(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Global pg_control file is missing from backup directory'):
self.backup_persistent_files._copy_pg_control_file(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files',
side_effect=[Mock(), Mock(), Mock(), Exception('Error while backing up files')])
def test_copy_per_db_pt_files_with_restore_with_errors(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Restore of per database persistent files failed'):
self.backup_persistent_files._copy_per_db_pt_files(restore=True)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_files')
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_without_errors(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7, mock8):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
self.assertEqual(None, self.backup_persistent_files.restore())
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_global_file_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_per_db_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_xlog_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_pg_control_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', return_value=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_global_and_per_db_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_global_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_per_db_pt_files', return_value=None)
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_Xactlog_files', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.BackupPersistentTableFiles._copy_pg_control_file', side_effect=Exception('Error'))
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.Command')
@patch('gppylib.operations.persistent_rebuild.ValidateMD5Sum.init')
def test_restore_with_xlog_and_pg_control_bkup_error(self, mock1, mock2, mock3, mock4, mock5, mock6, mock7):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.backup_persistent_files.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Error'):
self.backup_persistent_files.restore()
class RebuildTableTestCase(unittest.TestCase):
def setUp(self):
self.rebuild_table = RebuildTable(dbid_info=None)
def test_initializer_captures_values(self):
self.rebuild_table = RebuildTable(dbid_info="abcd", has_mirrors="efg", batch_size=123, backup_dir=456)
self.assertEquals("abcd",self.rebuild_table.dbid_info)
self.assertEquals("efg",self.rebuild_table.has_mirrors)
self.assertEquals(123,self.rebuild_table.batch_size)
self.assertEquals(456,self.rebuild_table.backup_dir)
def test_get_valid_dbids(self):
content_ids = [1, 2]
expected = [0, 1]
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
self.assertEqual(expected, self.rebuild_table._get_valid_dbids(content_ids))
def test_get_valid_dbids_empty_contents(self):
content_ids = []
expected = []
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
self.assertEqual(expected, self.rebuild_table._get_valid_dbids(content_ids))
def test_get_valid_dbids_non_matching_content_ids(self):
content_ids = [3, 4, 5]
expected = []
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
self.assertEqual(expected, self.rebuild_table._get_valid_dbids(content_ids))
def test_get_valid_dbids_content_ids_down(self):
content_ids = [1, 2, 3]
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u' if i % 2 else 'd'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
with self.assertRaisesRegexp(Exception, 'Segment .* is down. Cannot continue with persistent table rebuild'):
self.rebuild_table._get_valid_dbids(content_ids)
def test_get_valid_dbids_content_ids_resync(self):
content_ids = [1, 2, 3]
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
m.getSegmentMode.return_value = 'r' if i % 2 else 's'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
with self.assertRaisesRegexp(Exception, 'Segment .* is in resync. Cannot continue with persistent table rebuild'):
self.rebuild_table._get_valid_dbids(content_ids)
@patch('gppylib.operations.persistent_rebuild.ValidatePersistentBackup.validate_backups', return_value=Mock())
def test_get_valid_dbids_content_ids_are_mirrors(self, mock1):
content_ids = [1, 2, 3]
expected = [1]
mock_segs = []
for i in range(2):
m = Mock()
m.getSegmentContentId.return_value = i + 1
m.getSegmentRole.return_value = 'p' if i % 2 else 'm'
m.getSegmentDbId.return_value = i
m.getSegmentPort.return_value = 5000 + i
m.getSegmentHostName.return_value = 'mdw%d' % (i + 1)
m.getSegmentStatus.return_value = 'u'
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_table.gparray = m
self.assertEqual(expected, self.rebuild_table._get_valid_dbids(content_ids))
@patch('gppylib.operations.persistent_rebuild.GpArray')
@patch('gppylib.operations.persistent_rebuild.RebuildTable._validate_backups')
@patch('gppylib.operations.persistent_rebuild.RebuildTable._get_valid_dbids', return_value=[1, 2, 3])
@patch('gppylib.operations.persistent_rebuild.ParallelOperation.run')
def test_rebuild(self, mock1, mock2, mock3, mock4):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.rebuild_table.dbid_info = [d1, d2]
expected_success = [d1, d2]
expected_failure = []
self.assertEqual((expected_success, expected_failure), self.rebuild_table.rebuild())
@patch('gppylib.operations.persistent_rebuild.GpArray')
@patch('gppylib.operations.persistent_rebuild.RebuildTable._validate_backups')
@patch('gppylib.operations.persistent_rebuild.RebuildTable._get_valid_dbids', return_value=[1, 2, 3])
@patch('gppylib.operations.persistent_rebuild.ParallelOperation.run')
@patch('gppylib.operations.persistent_rebuild.RemoteOperation.get_ret', side_effect=[Mock(), Exception('Error')])
def test_rebuild_with_errors(self, mock1, mock2, mock3, mock4, mock5):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.rebuild_table.dbid_info = [d1, d2]
expected_success = [d1]
expected_failure = [(d2, 'Error')]
self.assertEqual((expected_success, expected_failure), self.rebuild_table.rebuild())
class ValidatePersistentBackupTestCase(unittest.TestCase):
def setUp(self):
self.validate_persistent_backup = ValidatePersistentBackup(dbid_info=None, timestamp='20140605101010')
def test_process_results(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m1.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m2 = Mock()
m2.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m2.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
self.validate_persistent_backup._process_results(d1, m)
def test_process_results_with_errors(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m1.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m2 = Mock()
m2.get_results.return_value = CommandResult(1, '/tmp/f1', '', True, False)
m2.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
with self.assertRaisesRegexp(Exception, 'Failed to validate backups'):
self.validate_persistent_backup._process_results(d1, m)
def test_process_results_with_missing_backup(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m1.cmdStr = "find /tmp/f1 -name pt_rebuild_bk_"
m2 = Mock()
m2.get_results.return_value = CommandResult(0, '', '', True, False)
m2.cmdStr = "find /foo/bar -name pt_rebuild_bk_"
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
with self.assertRaisesRegexp(Exception, 'Failed to validate backups'):
self.validate_persistent_backup._process_results(d1, m)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
def test_validate(self, mock1):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.validate_persistent_backup.dbid_info = [d1, d2]
self.validate_persistent_backup.validate_backups()
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.ValidatePersistentBackup._process_results', side_effect=Exception('Failed to validate backups'))
def test_validate_error_in_workerpool(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.validate_persistent_backup.dbid_info = [d1, d2]
with self.assertRaisesRegexp(Exception, 'Failed to validate backups'):
self.validate_persistent_backup.validate_backups()
class RunBackupRestoreTestCase(unittest.TestCase):
def setUp(self):
self.run_backup_restore = RunBackupRestore(dbid_info=None, timestamp=None)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results')
def test_run_backup_restore(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results', side_effect=Exception('ERROR'))
def test_run_backup_restore_with_errors(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results')
def test_run_backup_restore_with_restore(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map, restore=True)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results', side_effect=Exception('ERROR'))
def test_run_backup_restore_with_errors_with_restore(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map, restore=True)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results')
def test_run_backup_restore_with_validate(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map, validate_backups=True)
@patch('gppylib.operations.persistent_rebuild.WorkerPool')
@patch('gppylib.operations.persistent_rebuild.RunBackupRestore._process_results', side_effect=Exception('ERROR'))
def test_run_backup_restore_with_errors_with_validate(self, mock1, mock2):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
host_to_dbid_info_map = {'h1': [d1], 'h2': [d2]}
with self.assertRaisesRegexp(Exception, 'ERROR'):
self.run_backup_restore._run_backup_restore(host_to_dbid_info_map, validate_backups=True)
def test_get_host_to_dbid_info_map(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h2', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
expected = {'h1': [d1], 'h2': [d2]}
self.run_backup_restore.dbid_info = [d1, d2]
self.assertEqual(expected, self.run_backup_restore._get_host_to_dbid_info_map())
def test_get_host_to_dbid_info_map_empty(self):
self.run_backup_restore.dbid_info = []
self.assertEqual({}, self.run_backup_restore._get_host_to_dbid_info_map())
def test_get_host_to_dbid_info_map_multiple_entries_per_host(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
d2 = DbIdInfo(2, 'p', 3, 5002, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
expected = {'h1': [d1, d2]}
self.run_backup_restore.dbid_info = [d1, d2]
self.assertEqual(expected, self.run_backup_restore._get_host_to_dbid_info_map())
def test_process_results(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
self.run_backup_restore._process_results(m, 'ERR')
def test_process_results_with_errors(self):
d1 = DbIdInfo(1, 'p', 2, 5001, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
m1 = Mock()
m1.get_results.return_value = CommandResult(0, '/tmp/f1', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(1, 'ERR', '', True, False)
m = Mock()
m.getCompletedItems.return_value = [m1, m2]
with self.assertRaisesRegexp(Exception, 'ERR'):
self.run_backup_restore._process_results(m, 'ERR')
class ValidateMD5SumTestCase(unittest.TestCase):
def setUp(self):
self.validate_md5sum = ValidateMD5Sum(pool=None)
@patch('platform.system', return_value='Darwin')
def test_get_md5_prog_for_osx(self, mock1):
self.assertEqual('md5', self.validate_md5sum._get_md5_prog())
@patch('platform.system', return_value='Linux')
def test_get_md5_prog_for_linux(self, mock1):
self.assertEqual('md5sum', self.validate_md5sum._get_md5_prog())
@patch('platform.system', return_value='Solaris')
def test_get_md5_prog_for_invalid_os(self, mock1):
with self.assertRaisesRegexp(Exception, 'Cannot determine the md5 program since Solaris platform is not supported'):
self.validate_md5sum._get_md5_prog()
@patch('platform.system', return_value='Darwin')
def test_get_md5_results_pat_for_osx(self, mock1):
pat = re.compile('MD5 \((.*)\) = (.*)')
self.assertEqual(pat, self.validate_md5sum._get_md5_results_pat())
@patch('platform.system', return_value='Linux')
def test_get_md5_results_pat_for_osx(self, mock1):
pat = re.compile('(.*) (.*)')
self.assertEqual(pat, self.validate_md5sum._get_md5_results_pat())
@patch('platform.system', return_value='Solaris')
def test_get_md5_results_pat_for_invalid_os(self, mock1):
with self.assertRaisesRegexp(Exception, 'Cannot determine the pattern for results of md5 program since Solaris platform is not supported'):
self.validate_md5sum._get_md5_results_pat()
@patch('platform.system', return_value='Darwin')
def test_process_results_on_osx(self, mock1):
m = Mock()
m1 = Mock()
m1.get_results.return_value = CommandResult(0, 'MD5 (foo) = afsdfasdf', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(0, 'MD5 (foo1) = sdfadsff', '', True, False)
m.getCompletedItems.return_value = [m1, m2]
self.validate_md5sum.pool = m
self.validate_md5sum.md5_results_pat = re.compile('MD5 \((.*)\) = (.*)')
expected = {'foo': 'afsdfasdf', 'foo1': 'sdfadsff'}
self.assertEqual(expected, self.validate_md5sum._process_md5_results())
@patch('platform.system', return_value='Darwin')
def test_process_results_on_osx_with_error(self, mock1):
m = Mock()
m1 = Mock()
m1.get_results.return_value = CommandResult(0, 'MD5 (foo1) = sdfadsff', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(1, '', 'Error', True, False)
m.getCompletedItems.return_value = [m1, m2]
self.validate_md5sum.pool = m
self.validate_md5sum.md5_results_pat = re.compile('MD5 \((.*)\) = (.*)')
with self.assertRaisesRegexp(Exception, 'Unable to calculate md5sum'):
self.validate_md5sum._process_md5_results()
@patch('platform.system', return_value='Linux')
def test_process_results_on_linux(self, mock1):
m = Mock()
m1 = Mock()
m1.get_results.return_value = CommandResult(0, 'afsdfasdf foo', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(0, 'sdfadsff foo1', '', True, False)
m.getCompletedItems.return_value = [m1, m2]
self.validate_md5sum.pool = m
self.validate_md5sum.md5_results_pat = re.compile('(.*) (.*)')
expected = {'foo': 'afsdfasdf', 'foo1': 'sdfadsff'}
self.assertEqual(expected, self.validate_md5sum._process_md5_results())
@patch('platform.system', return_value='Linux')
def test_process_results_on_linux_with_error(self, mock1):
m = Mock()
m1 = Mock()
m1.get_results.return_value = CommandResult(0, 'sdfadsff fo1', '', True, False)
m2 = Mock()
m2.get_results.return_value = CommandResult(1, '', 'Error', True, False)
m.getCompletedItems.return_value = [m1, m2]
self.validate_md5sum.pool = m
self.validate_md5sum.md5_results_pat = re.compile('(.*) (.*)')
with self.assertRaisesRegexp(Exception, 'Unable to calculate md5sum'):
self.validate_md5sum._process_md5_results()
class RebuildPersistentTableTestCase(unittest.TestCase):
def setUp(self):
self.rebuild_persistent_table = RebuildPersistentTables(content_id = None,
contentid_file = None,
backup=None,
restore=None,
batch_size=None,
backup_dir=None)
@patch('gppylib.operations.persistent_rebuild.platform.system', return_value='Linux')
def test_check_platform_linux(self, mock1):
self.rebuild_persistent_table._check_platform()
@patch('gppylib.operations.persistent_rebuild.platform.system', return_value='Solaris')
def test_check_platform_non_linux(self, mock1):
with self.assertRaisesRegexp(Exception, 'This tool is only supported on Linux and OSX platforms'):
self.rebuild_persistent_table._check_platform()
def test_validate_has_mirrors_and_standby(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i - 1
m.isSegmentMirror.return_value = True if i < 3 else False
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_persistent_table.gparray = m
self.rebuild_persistent_table._validate_has_mirrors_and_standby()
self.assertTrue(self.rebuild_persistent_table.has_mirrors)
def test_validate_has_mirrors_and_standby_with_no_mirrors(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i - 1
m.isSegmentMirror.return_value = False
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_persistent_table.gparray = m
self.rebuild_persistent_table._validate_has_mirrors_and_standby()
self.assertFalse(self.rebuild_persistent_table.has_mirrors)
def test_validate_has_mirrors_and_standby_with_mirrors_for_master(self):
mock_segs = []
for i in range(6):
m = Mock()
m.getSegmentContentId.return_value = i - 1
m.isSegmentMirror.return_value = True if i == -1 else False
mock_segs.append(m)
m = Mock()
m.getDbList.return_value = mock_segs
self.rebuild_persistent_table.gparray = m
self.rebuild_persistent_table._validate_has_mirrors_and_standby()
self.assertTrue(self.rebuild_persistent_table.has_standby)
@patch('gppylib.operations.persistent_rebuild.findCmdInPath', return_value=True)
def test_check_md5_prog(self, mock1):
self.rebuild_persistent_table._check_md5_prog()
@patch('gppylib.operations.persistent_rebuild.findCmdInPath', return_value=False)
def test_check_md5_prog_no_md5(self, mock1):
with self.assertRaisesRegexp(Exception, 'Unable to find md5.* program. Please make sure it is in PATH'):
self.rebuild_persistent_table._check_md5_prog()
@patch('gppylib.operations.persistent_rebuild.GpVersion.local', return_value=GpVersion('4.2.7.3'))
def test_check_database_version(self, mock1):
self.rebuild_persistent_table._check_database_version()
@patch('gppylib.operations.persistent_rebuild.GpVersion.local', return_value=GpVersion('4.0.1.0'))
def test_check_database_version_with_lower_version(self, mock1):
with self.assertRaisesRegexp(Exception, 'This tool is not supported on Greenplum version lower than 4.1.0.0'):
self.rebuild_persistent_table._check_database_version()
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', side_effect=[[[5090], [5091], [5092], [5093]], [[123, 'template1']],
[[5094], [16992]]])
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_persistent_table_filenames(self, mock1, mock2, mock3):
d1 = DbIdInfo(2, 'p', 3, 5002, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
self.rebuild_persistent_table.dbid_info = [d1]
self.rebuild_persistent_table._get_persistent_table_filenames()
expected_global = defaultdict(defaultdict)
expected_files = ['5090', '5091', '5092', '5093']
expected_dbid = {3:expected_files}
expected_global = {'h1':expected_dbid}
expected_perdb_pt_files = defaultdict(defaultdict)
exp_pt_files = ['5094', '16992']
exp_dboid = {123:exp_pt_files}
exp_dbid = {3:exp_dboid}
expected_perdb_pt_file = {'h1':exp_dbid}
from gppylib.operations.persistent_rebuild import GLOBAL_PERSISTENT_FILES, PER_DATABASE_PERSISTENT_FILES
self.assertEqual(GLOBAL_PERSISTENT_FILES, expected_global)
self.assertEqual(PER_DATABASE_PERSISTENT_FILES, expected_perdb_pt_file)
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', side_effect=pt_query_side_effect)
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_persistent_table_filenames_lacking_global_relfilenode(self, mock1, mock2, mock3):
d1 = DbIdInfo(2, 'p', 3, 5002, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
global remove_global_pt_entry
remove_global_pt_entry = True
self.rebuild_persistent_table.dbid_info = [d1]
with self.assertRaisesRegexp(Exception, 'Missing relfilenode entry of global pesistent tables in pg_class'):
self.rebuild_persistent_table._get_persistent_table_filenames()
remove_global_pt_entry = False
@patch('gppylib.operations.persistent_rebuild.dbconn.execSQL', side_effect=pt_query_side_effect)
@patch('gppylib.operations.persistent_rebuild.dbconn.connect')
@patch('gppylib.operations.persistent_rebuild.dbconn.DbURL')
def test_get_persistent_table_filenames_lacking_per_database_relfilenode(self, mock1, mock2, mock3):
d1 = DbIdInfo(2, 'p', 3, 5002, 'h1', {1000: '/tmp/p1', 3052: '/tmp/p2'}, {1000: [2000], 3052: [2001]}, {2000: [123], 2001: [234]}, False)
global remove_per_db_pt_entry
remove_per_db_pt_entry = True
self.rebuild_persistent_table.dbid_info = [d1]
with self.assertRaisesRegexp(Exception, 'Missing relfilenode entry of per database persistent tables in pg_class'):
self.rebuild_persistent_table._get_persistent_table_filenames()
remove_per_db_pt_entry = False
if __name__ == '__main__':
unittest.main()
| Quikling/gpdb | gpMgmt/bin/gppylib/operations/test/unit/test_unit_persistent_rebuild.py | Python | apache-2.0 | 87,698 |
"""
Course: Statistical Methods in Artificial Intelligence (CSE471)
Semester: Fall '17
Professor: Gandhi, Vineet
Assignment 2: SVM example using scikit-learn
Code to separate 2D and 3D linearly-separable data with SVMs using
the scikit-learn library. Examples for different kernels with
various parameter combinations are provided. Visualized using
matplotlib.
"""
from sklearn import svm
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.manifold.t_sne import TSNE
# For reproducibility:
# Same seed produces similar randomness
np.random.seed(123)
def generate_training_data_2D():
"""
Generate 100 random (x, y) coordinates for
two different classes separable by a line.
The points are plotted and saved in a PDF
in the present directory.
Returns
-------
c1, c2: Points belonging to the two classes
"""
c11 = np.random.uniform(-1.50, 1.50, 100)
c12 = np.random.uniform(-1.50, 1.50, 100)
c2111 = np.random.uniform(-3.50, -2.50, 25)
c2112 = np.random.uniform(2.50, 3.50, 25)
c2121 = np.random.uniform(-3.50, 3.50, 25)
c2122 = np.random.uniform(-3.50, 3.50, 25)
c2211 = np.random.uniform(-3.50, 3.50, 25)
c2212 = np.random.uniform(-3.50, 3.50, 25)
c2221 = np.random.uniform(-3.50, -2.50, 25)
c2222 = np.random.uniform(2.50, 3.50, 25)
c1 = np.array([[i, j] for i, j in zip(c11, c12)])
c211 = np.array([[i, j] for i, j in zip(c2111, c2211)])
c212 = np.array([[i, j] for i, j in zip(c2112, c2212)])
c221 = np.array([[i, j] for i, j in zip(c2121, c2221)])
c222 = np.array([[i, j] for i, j in zip(c2122, c2222)])
c2 = np.concatenate([c211, c212, c221, c222], axis=0)
points = plt.figure()
plt.plot(c1[:, 0], c1[:, 1], 'o', c2[:, 0], c2[:, 1], '*')
plt.show()
plt.close()
return c1, c2
def generate_training_data_3D():
"""
Generate 20 random (x, y, z) coordinates for
two different classes separable by a line.
The points are plotted and saved in a PDF
in the present directory.
Returns
-------
c1, c2: Points belonging to the two classes
"""
c11 = np.random.uniform(0.05, 1.50, 20)
c12 = np.random.uniform(-1.50, 1.50, 20)
c13 = np.random.uniform(-2.50, -0.05, 20)
c21 = np.random.uniform(-1.50, -0.05, 20)
c22 = np.random.uniform(-1.50, 1.50, 20)
c23 = np.random.uniform(0.05, 2.50, 20)
c1 = np.array([[i, j, k] for i, j, k in zip(c11, c12, c13)])
c2 = np.array([[i, j, k] for i, j, k in zip(c21, c22, c23)])
points = plt.figure()
ax = points.add_subplot(111, projection='3d')
ax.scatter(c1[:, 0], c1[:, 1], c1[:, 2], c='r', marker='^')
ax.scatter(c2[:, 0], c2[:, 1], c2[:, 2], c='b', marker='*')
plt.show()
plt.close()
return c1, c2
def create_meshgrid(x, y, h=0.015):
"""
Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""
Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
Returns
-------
out: The contours object
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def get_fitted_svm(X, Y):
"""
Change the values of C in the range [1.0, 1500.0] to see how
different kernels behave depending on C.
Higher C results in less misclassification rate with a complex
decision surface; lower C prefers a simple decision boundary
and hence has chances of higher misclassification rate.
"""
C = 10.0 # SVM Regularization Parameter
"""
Different classifiers using the various kernels available.
-> Polynomial kernels with different degrees work for different
types of problems.
-> RBF and Sigmoid kernels change with the variation of gamma (margin).
"""
linear_classifier = svm.SVC(kernel='linear', C=C)
linear_classifier.fit(X, Y)
poly_classifier_1 = svm.SVC(kernel='poly', degree=1, C=C)
poly_classifier_1.fit(X, Y)
poly_classifier_2 = svm.SVC(kernel='poly', degree=2, C=C)
poly_classifier_2.fit(X, Y)
poly_classifier_3 = svm.SVC(kernel='poly', degree=3, C=C)
poly_classifier_3.fit(X, Y)
rbf_classifier_g0 = svm.SVC(kernel='rbf', gamma=0.5, C=C)
rbf_classifier_g0.fit(X, Y)
rbf_classifier_g1 = svm.SVC(kernel='rbf', gamma=0.8, C=C)
rbf_classifier_g1.fit(X, Y)
sigmoid_classifier_g0 = svm.SVC(kernel='sigmoid', gamma=0.5, C=C)
sigmoid_classifier_g0.fit(X, Y)
sigmoid_classifier_g1 = svm.SVC(kernel='sigmoid', gamma=0.8, C=C)
sigmoid_classifier_g1.fit(X, Y)
models = (linear_classifier, poly_classifier_1, poly_classifier_2, poly_classifier_3,
rbf_classifier_g0, rbf_classifier_g1, sigmoid_classifier_g0, sigmoid_classifier_g1)
titles = ('SVC with Linear kernel',
'SVC with Polynomial kernel (degree = 1)',
'SVC with Polynomial kernel (degree = 2)',
'SVC with Polynomial kernel (degree = 3)',
'SVC with RBF kernel (gamma = 0.5)',
'SVC with RBF kernel (gamma = 0.8)',
'SVC with Sigmoid kernel (gamma = 0.5)',
'SVC with Sigmoid kernel (gamma = 0.8)')
return models, titles
def plot_decision_boundary(X, Y, models, titles):
"""
Plotting decision boundaries logic for various kernels.
This logic may be helpful in visualizing your final classifier
in the assignment question.
"""
fig, sub = plt.subplots(2, 4, figsize=(20, 8))
plt.subplots_adjust(wspace=1.0, hspace=0.6)
xx, yy = create_meshgrid(X[:, 0], X[:, 1])
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X[:,0], X[:,1], c=Y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Xvalues')
ax.set_ylabel('Yvalues')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
back = matplotlib.get_backend()
manager = plt.get_current_fig_manager()
if "QT" in back:
manager.window.showMaximized()
elif "Tk" in back:
manager.resize(*manager.window.maxsize())
else:
manager.frame.Maximize(True)
plt.show()
plt.close()
def SVM():
"""
Creates different sklearn.SVC objects using
the various kernels available for SVM.
The decision boundaries for various kernels are
plotted as contours in a matplotlib Figure with
subplots. This is helpful for visualization.
However, these boundaries are for 2D data and hence
should not be considered as Holy Grail for high
dimensional data. The actual assignment question will
have much more complex data.
"""
x1, x2 = generate_training_data_2D()
Y = np.concatenate([np.zeros(x1.shape[0], dtype=np.int32),
np.ones(x2.shape[0], dtype=np.int32)])
X = np.concatenate([x1, x2], axis=0)
rng = np.random.get_state()
np.random.shuffle(X)
# Set the random state back to previous to shuffle X & Y similarly
np.random.set_state(rng)
np.random.shuffle(Y)
models, titles = get_fitted_svm(X, Y)
plot_decision_boundary(X, Y, models, titles)
if __name__ == '__main__':
SVM()
| dracarys983/SVM | visualization_examples/linearly_inseparable.py | Python | mit | 7,952 |
# バイト配列型を利用すると、ミュータブルなデータ型(同一オブジェクトはメモリ上で変更が許容される)
# データはバイナリデータである。
ba1 = bytearray()
ba1.append(115)
print(ba1)
| ekazyam/study | パーフェクトPython/pp_078_bytearray.py | Python | mit | 237 |
#!/usr/bin/env python
class Solution:
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
sum1, sum2, lb = 0, 0, min(nums)
mask1 = int('55555555', base=16)
mask2 = int('AAAAAAAA', base=16)
for num in nums:
sum1 += ((num - lb) & mask1)
sum2 += ((num - lb) & mask2) >> 1
# print('before mod: ', num, bin(sum1)[2:], bin(sum2)[2:])
for i in range(16):
if (sum1 >> 2*i) & 3 == 3:
sum1 ^= (3 << 2*i)
if (sum2 >> 2*i) & 3 == 3:
sum2 ^= (3 << 2*i)
# print('after mod: ', num, bin(sum1)[2:], bin(sum2)[2:])
return sum1 + (sum2 << 1) + lb
mask1 = int('55555555', base=16)
mask2 = int('AAAAAAAA', base=16)
nums1 = 2
int1 = (nums1 & mask1)
int2 = (nums1 & mask2) >> 1
nums = [0,1,0,1,0,1,99]
nums = [2,2,3,2]
nums = [2,2,5,5,1,5,1,1,0,2]
nums = [-2,-2,1,1,-3,1,-3,-3,-4,-2]
print(int1)
print(int2)
sol = Solution()
print(sol.singleNumber(nums))
| eroicaleo/LearningPython | interview/leet/137_Single_Number_II.py | Python | mit | 1,076 |
#!/usr/bin/env python
from __future__ import division, print_function
import argparse
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import json
import os
import os.path
import re
import sys
import requests
DATA_TABLE_NAME = "primer_scheme_bedfiles"
def write_artic_style_bed(input_file, bed_output_filename):
with open(bed_output_filename, "w") as bed_output_file:
for line in input_file:
fields = line.split("\t")
if len(fields) < 6:
# too short to encode the strand format
exit("invalid format in BED file: {}".format(line.rstrip()))
try:
# try and parse field 5 as a number
score = float(fields[4])
except ValueError:
# Alright, this is an ARTIC-style bed,
# which is actually against the specs, but required by the
# ARTIC pipeline.
pass
else:
# This is a regular bed with numbers in the score column.
# We need to "fix" it for the ARTIC pipeline.
fields[4] = '_{0}'.format(score)
bed_output_file.write("\t".join(fields))
def fetch_artic_primers(output_directory, primers):
primer_sets = {
"SARS-CoV-2-ARTICv1": "https://raw.githubusercontent.com/artic-network/artic-ncov2019/master/primer_schemes/nCoV-2019/V1/nCoV-2019.bed",
"SARS-CoV-2-ARTICv2": "https://raw.githubusercontent.com/artic-network/artic-ncov2019/master/primer_schemes/nCoV-2019/V2/nCoV-2019.bed",
"SARS-CoV-2-ARTICv3": "https://raw.githubusercontent.com/artic-network/artic-ncov2019/master/primer_schemes/nCoV-2019/V3/nCoV-2019.bed",
}
data = []
for name, url in primer_sets.items():
if name not in primers:
continue
response = requests.get(url)
if response.status_code != 200:
print(
"Error: download of",
url,
"failed with code",
response.status_code,
file=sys.stderr,
)
exit(response.status_code)
bed_output_filename = os.path.join(output_directory, name + ".bed")
write_artic_style_bed(StringIO(response.text), bed_output_filename)
description = name[:-2] + " " + name[-2:] + " primer set"
data.append(dict(value=name, path=bed_output_filename, description=description))
return data
def install_primer_file(
output_directory, input_filename, primer_name, primer_description
):
name = re.sub(r"\W", "", str(primer_name).replace(" ", "_"))
output_filename = os.path.join(output_directory, name + ".bed")
with open(input_filename) as input_file:
write_artic_style_bed(input_file, output_filename)
data = [dict(value=name, description=primer_description, path=output_filename)]
return data
class SplitArgs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values.split(","))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Fetch ARTIC SARS-CoV-2 primer files for Galaxy/IRIDA use"
)
parser.add_argument(
"--output_directory", default="tmp", help="Directory to write output to"
)
primer_file = parser.add_argument_group()
primer_file.add_argument(
"--primer_file", help="BED format file containing primer scheme"
)
primer_file.add_argument(
"--primer_name",
help="Name of primer scheme (one word). Required if --primer_file is used",
)
primer_file.add_argument(
"--primer_description",
help="Description of primer scheme. Required if --primer_file is used",
)
artic = parser.add_argument_group()
artic.add_argument(
"--artic_primers",
action=SplitArgs,
help="Comma separated list of primers to fetch",
)
parser.add_argument(
"galaxy_datamanager_filename",
help="Galaxy JSON format file describing data manager inputs",
)
args = parser.parse_args()
if args.artic_primers is None and args.primer_file is None:
print(
"One of --artic_primers or --primer_file + --primer_name + --primer_description is required.",
file=sys.stderr,
)
exit(1)
elif args.primer_file is not None and (
args.primer_name is None or args.primer_description is None
):
print(
"If --primer_file is used --primer_name and --primer_description is also required",
file=sys.stderr,
)
exit(1)
elif args.primer_file is not None and args.artic_primers is not None:
print(
"Only one of --artic_primers or --primer_file + --primer_name + --primer_description can be chosen"
)
exit(1)
with open(args.galaxy_datamanager_filename) as fh:
config = json.load(fh)
output_directory = config.get("output_data", [{}])[0].get("extra_files_path", None)
if output_directory is None:
output_directory = args.output_directory
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
data_manager_dict = {}
data_manager_dict["data_tables"] = config.get("data_tables", {})
data_manager_dict["data_tables"][DATA_TABLE_NAME] = data_manager_dict[
"data_tables"
].get(DATA_TABLE_NAME, [])
if args.artic_primers:
data = fetch_artic_primers(output_directory, args.artic_primers)
else:
data = install_primer_file(
output_directory,
args.primer_file,
args.primer_name,
args.primer_description,
)
data_manager_dict["data_tables"][DATA_TABLE_NAME].extend(data)
with open(args.galaxy_datamanager_filename, "w") as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
| jj-umn/tools-iuc | data_managers/data_manager_primer_scheme_bedfiles/data_manager/install_primer_scheme_bedfiles.py | Python | mit | 5,949 |
from random import randrange
from myhdl import Signal
from myhdl import intbv
from myhdl import traceSignals
from myhdl import Simulation
from myhdl import bin, instances
from myhdl import delay, instance, always, always_seq, always_comb
from myhdl import StopSimulation
from alu import ALU, t_ALU_FUNCTION
PERIOD = 20
STEPS = 10
WIDTH = 8
BIN = lambda k: bin(k, WIDTH)
def test_alu():
def alu_test():
clk = Signal(bool(0))
# flags
n = Signal(bool(0))
z = Signal(bool(0))
o = Signal(bool(0))
# a op b => q
a = Signal(intbv(0)[WIDTH:])
b = Signal(intbv(0)[WIDTH:])
q = Signal(intbv(0)[WIDTH:])
# op
af = Signal(t_ALU_FUNCTION.ZERO)
alu_inst = ALU(q, o, z, n, a, b, af, clk, width=WIDTH)
DELAY = delay(PERIOD // 2)
@always(DELAY)
def clkgen():
clk.next = not clk
@instance
def monitor():
while True:
print "a %s %s b %s => q %s" % (a, af, b, q)
yield clk.negedge
@instance
def stimulus():
FF = 2 ** WIDTH - 1
F0 = FF & ~(0xf)
F00 = FF & ~(0xff)
def op(x, y, f):
a.next = x
b.next = y
af.next = f
yield delay(PERIOD // 4)
yield clk.posedge
yield op(0, 0, t_ALU_FUNCTION.ZERO)
yield delay(PERIOD // 4)
assert q == 0
yield op(0, 0, t_ALU_FUNCTION.ONES)
yield delay(PERIOD // 4)
assert q == FF, "q %d %s %s" % (q, hex(q), BIN(q))
yield op(2, 2, t_ALU_FUNCTION.A_PLUS_B)
yield delay(PERIOD // 4)
assert q == 4
yield op(1, 0, t_ALU_FUNCTION.NEG_A)
yield delay(PERIOD // 4)
assert q == FF
yield op(42, 17, t_ALU_FUNCTION.A)
yield delay(PERIOD // 4)
assert q == 42
yield op(42, 17, t_ALU_FUNCTION.B)
yield delay(PERIOD // 4)
assert q == 17
yield op(0xaa, 0, t_ALU_FUNCTION.NOT_A)
yield delay(PERIOD // 4)
assert q == 0x55 | F00, "q %d %s %s" % (q, hex(q), BIN(q))
yield op(0, 0x55, t_ALU_FUNCTION.NOT_B)
yield delay(PERIOD // 4)
assert q == 0xaa | F00, "q %d %s %s" % (q, hex(q), BIN(q))
yield op(5, 0, t_ALU_FUNCTION.A_PLUS_1)
yield delay(PERIOD // 4)
assert q == 6, "q %d %s %s" % (q, hex(q), BIN(q))
yield op(0, 8, t_ALU_FUNCTION.B_PLUS_1)
yield delay(PERIOD // 4)
assert q == 9, "q %d %s %s" % (q, hex(q), BIN(q))
yield op(0x50, 0x06, t_ALU_FUNCTION.A_OR_B)
yield delay(PERIOD // 4)
assert q == 0x56, "q %d %s %s" % (q, hex(q), BIN(q))
yield op(0x04, 0xff, t_ALU_FUNCTION.A_AND_B)
yield delay(PERIOD // 4)
assert q == 0x04, "q %d %s %s" % (q, hex(q), BIN(q))
yield op(0x08, 0xff, t_ALU_FUNCTION.A_XOR_B)
yield delay(PERIOD // 4)
assert q == 0xf7, "q %d %s %s" % (q, hex(q), BIN(q))
yield DELAY
yield DELAY
raise StopSimulation
return instances()
tb = traceSignals(alu_test)
sim = Simulation(tb)
sim.run()
if __name__ == '__main__':
test_alu()
| seletz/myhdl-test | test_alu.py | Python | mit | 3,465 |
"""
-------------------------------------------------------------------------
AIOpening - modules/__init__.py
Neural Net Modules constructed with deepmind/sonnet
created: 2017/08/31 in PyCharm
(c) 2017 Sven - ducandu GmbH
-------------------------------------------------------------------------
"""
from .fully_connected_nn import FullyConnectedNN
from .convolutional_2d_nn import Convolutional2DNN
from .flatten_layer import FlattenLayer
__all__ = ["FullyConnectedNN", "Convolutional2DNN", "FlattenLayer"]
| ducandu/aiopening | aiopening/modules/__init__.py | Python | mit | 519 |
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2015-2016 by Mike Taylor
:license: MIT, see LICENSE for more details.
"""
import os
import sys
import click
_cfg_name = 'doukan.cfg'
_cfg_path = click.get_app_dir('doukan')
_cfg_default = """{ 'active': False }"""
class Options(object):
def __init__(self):
self.verbose = False
self.configName = None
self.configPath = None
self.configFile = None
cfg_items = click.make_pass_decorator(Options, ensure=True)
@click.group()
@click.option('--verbose', is_flag=True)
@click.option('--cfg_path', default=_cfg_path, type=click.Path(),
help='Path where the configuration file can be found. Default value is %s' % _cfg_path)
@click.option('--cfg_name', default=_cfg_name,
help='Configuration file name. Default value is %s' % _cfg_name)
@cfg_items
def cli(config, verbose, cfg_path, cfg_name):
config.verbose = verbose
config.configPath = cfg_path
config.configName = cfg_name
config.configFile = os.path.join(cfg_path, cfg_name)
@cli.command()
@click.option('--create', is_flag=True, default=False,
help='Create an empty configuration file if not found at the given path.')
@cfg_items
def check(config, create):
"""Checks that the configuration is functional."""
errors = []
events = []
if not os.path.exists(config.configPath) and create:
os.mkdir(config.configPath)
events.append('The configuration directory %s was created.' % config.configPath)
if not os.path.exists(config.configFile) and create:
with open(config.configFile, 'w') as h:
h.write(_cfg_default)
events.append('An empty configuration file was created.')
if not os.path.exists(config.configFile):
errors.append('The configuration file %s could not be found at %s' % (config.configName, config.configPath))
if not create:
errors.append(' Add to the check command the "--create" option to create an empty configuration file.')
if config.verbose and len(events) > 0:
click.echo('We performed the following events during this check')
for s in events:
click.echo(' %s' % s)
if len(errors) > 0:
click.echo('The configuration check failed with the following errors', err=True)
for s in errors:
click.echo(' %s' % s, err=True)
sys.exit(2)
| bear/doukan | doukan/main.py | Python | mit | 2,316 |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from django.conf.urls import url
from starthinker_ui.account import views
urlpatterns = [
url(r'^oauth_callback/$',
views.oauth_callback,
name='account.oauth.callback'),
url(r'^logout/$', views.logout, name='account.logout'),
]
| google/starthinker | starthinker_ui/account/urls.py | Python | apache-2.0 | 996 |
import requests,xbmcaddon,time,re
import resolveurl as urlresolver
from ..common import clean_title, clean_search,send_log,error_log
from ..scraper import Scraper
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
User_Agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
class moviewatcher(Scraper):
domains = ['http://moviewatcher.is/']
name = "moviewatcher"
sources = []
def __init__(self):
self.base_link = 'http://moviewatcher.is'
if dev_log=='true':
self.start_time = time.time()
def scrape_movie(self, title, year, imdb, debrid = False):
try:
search_id = clean_search(title.lower())
start_url = '%s/search?query=%s' %(self.base_link,search_id.replace(' ','+'))
#print start_url
headers={'User-Agent':User_Agent}
r = requests.get(start_url,headers=headers,timeout=5).content
#print r
match = re.compile('<a class="movie-title" href="(.+?)">(.+?)</a>',re.DOTALL).findall(r)
for url,name in match:
if not clean_title(title).lower() == clean_title(name).lower():
continue
if not year in url:
continue
url = self.base_link + url
#print 'Pass '+url
self.get_source(url)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,'Check Search')
return self.sources
# def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
# try:
# season_pull = "0%s"%season if len(season)<2 else season
# episode_pull = "0%s"%episode if len(episode)<2 else episode
# sep = 's%se%s' %(season_pull,episode_pull)
# search_id = clean_search(title.lower())
# start_url = '%s/search?query=%s' %(self.base_link,search_id.replace(' ','+'))
# headers={'User-Agent':User_Agent}
# r = requests.get(start_url,headers=headers,timeout=5)
# page = r.url
# if 'search?query' in page: #why this ?
# match = re.compile('class="movie-title" href="(.+?)">(.+?)</a>',re.DOTALL).findall(r.content)
# for url,name in match:
# if not clean_title(title).lower() == clean_title(name).lower():
# continue
# if '-tv-' in url:
# url = self.base_link + url +'/%s' %(sep)
# print 'pass_moviewatcherURL> '+url
# self.get_source(url)
# else:
# self.get_source(page,'unknown')
# except Exception, argument:
# if dev_log == 'true':
# error_log(self.name,'Check Search')
# return self.sources
def get_source(self,url):
try:
OPEN = requests.get(url).content
Regex = re.compile(">Play:.+?window.open.+?'(/redirect/.+?)'",re.DOTALL).findall(OPEN)
count = 0
for link in Regex:
#print link
link = self.base_link + link
headers={'User-Agent':User_Agent}
r = requests.get(link,headers=headers,allow_redirects=False)
stream_url = r.headers['location']
if not urlresolver.HostedMediaFile(stream_url).valid_url():
continue
host = stream_url.split('//')[1].replace('www.','')
host = host.split('/')[0].split('.')[0].title()
count +=1
self.sources.append({'source': host, 'quality': 'SD', 'scraper': self.name, 'url': stream_url,'direct': False})
if dev_log=='true':
end_time = time.time() - self.start_time
send_log(self.name,end_time,count)
except:
pass
| repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/it/moviewatcher.py | Python | gpl-2.0 | 4,205 |
# Copyright (C) 2005, 2006, 2008-2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Test for setup.py build process"""
import os
import sys
import subprocess
import bzrlib
from bzrlib import tests
# TODO: Run bzr from the installed copy to see if it works. Really we need to
# run something that exercises every module, just starting it may not detect
# some missing modules.
#
# TODO: Check that the version numbers are in sync. (Or avoid this...)
class TestSetup(tests.TestCaseInTempDir):
def test_build_and_install(self):
""" test cmd `python setup.py build`
This tests that the build process and man generator run correctly.
It also can catch new subdirectories that weren't added to setup.py.
"""
# setup.py must be run from the root source directory, but the tests
# are not necessarily invoked from there
self.source_dir = os.path.dirname(os.path.dirname(bzrlib.__file__))
if not os.path.isfile(os.path.join(self.source_dir, 'setup.py')):
self.skip(
'There is no setup.py file adjacent to the bzrlib directory')
try:
import distutils.sysconfig
makefile_path = distutils.sysconfig.get_makefile_filename()
if not os.path.exists(makefile_path):
self.skip(
'You must have the python Makefile installed to run this'
' test. Usually this can be found by installing'
' "python-dev"')
except ImportError:
self.skip(
'You must have distutils installed to run this test.'
' Usually this can be found by installing "python-dev"')
self.log('test_build running from %s' % self.source_dir)
build_dir = os.path.join(self.test_dir, "build")
install_dir = os.path.join(self.test_dir, "install")
self.run_setup([
'build', '-b', build_dir,
'install', '--root', install_dir])
# Install layout is platform dependant
self.assertPathExists(install_dir)
self.run_setup(['clean', '-b', build_dir])
def run_setup(self, args):
args = [sys.executable, './setup.py', ] + args
self.log('source base directory: %s', self.source_dir)
self.log('args: %r', args)
p = subprocess.Popen(args,
cwd=self.source_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
self.log('stdout: %r', stdout)
self.log('stderr: %r', stderr)
self.assertEqual(0, p.returncode,
'invocation of %r failed' % args)
| stewartsmith/bzr | bzrlib/tests/test_setup.py | Python | gpl-2.0 | 3,440 |
#!/usr/bin/env python
#-*-coding:utf-8-*-
import os
import sys
import subprocess
subdir_list = os.listdir(".")
for subdir in subdir_list:
subdir = os.path.join(".", subdir)
print(subdir)
# continue
subprocess.Popen("cd %s; rm *_adm1.*; rm *_adm2.*; rm *_adm3.*; rm *_adm4.*; rm read_me.pdf;" % subdir, shell=True)
| howl-anderson/SDMdata | sdmdata/gadm/remove_useless_file.py | Python | agpl-3.0 | 333 |
"""
TrainExtensions for doing random spatial windowing and flipping of an
image dataset on every epoch.
"""
import warnings
import numpy
from . import TrainExtension
from pylearn2.datasets.preprocessing import CentralWindow
from pylearn2.utils.rng import make_np_rng
try:
from ..utils._window_flip import random_window_and_flip_c01b
from ..utils._window_flip import random_window_and_flip_b01c
except ImportError:
raise ImportError("Import of Cython module failed. Please make sure you "
"have run 'python setup.py develop' in the pylearn2 "
"directory")
__authors__ = "David Warde-Farley"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "David Warde-Farley"
__email__ = "wardefar@iro"
def _zero_pad(array, amount, axes=(1, 2)):
"""
.. todo::
WRITEME
"""
if amount == 0:
return array
new_shape = []
slices = []
for i, s in enumerate(array.shape):
if i in axes:
new_shape.append(s + 2 * amount)
slices.append(slice(amount, -amount))
else:
new_shape.append(s)
slices.append(slice(None))
new_shape = tuple(new_shape)
slices = tuple(slices)
new_array = numpy.zeros(new_shape, dtype=array.dtype)
new_array[slices] = array
return new_array
class WindowAndFlip(TrainExtension):
"""
An extension that allows an image dataset to be flipped and
windowed after each epoch of training.
Parameters
----------
window_shape : WRITEME
randomize : list, optional
If specified, a list of Datasets to randomly window and
flip at each epoch.
randomize_once : list, optional
If specified, a list of Datasets to randomly window and
flip once at the start of training.
center : list, optional
If specified, a list of Datasets to centrally window
once at the start of training.
rng : numpy.random.RandomState object or seed, optional
A random number generator or seed used to create one.
Seeded deterministically by default.
pad_randomized : int, optional
Amount of padding to add to each side of the images
in `randomize` and `randomize_once`. Useful if you
want to do zero-padded windowing with `window_shape`
the actual size of the dataset, and validate/test on
full-size images instead of central patches. Default
is 0.
flip : bool, optional
Reflect images on the horizontal axis with probability
0.5. `True` by default.
"""
def __init__(self, window_shape, randomize=None, randomize_once=None,
center=None, rng=(2013, 02, 20), pad_randomized=0, flip=True):
self._window_shape = tuple(window_shape)
self._original = None
self._randomize = randomize if randomize else []
self._randomize_once = randomize_once if randomize_once else []
self._center = center if center else []
self._pad_randomized = pad_randomized
self._flip = flip
if randomize is None and randomize_once is None and center is None:
warnings.warn(self.__class__.__name__ + " instantiated without "
"any dataset arguments, and therefore does nothing",
stacklevel=2)
self._rng = make_np_rng(rng, which_method="random_integers")
def setup(self, model, dataset, algorithm):
"""
.. todo::
WRITEME
Notes
-----
`dataset` argument is ignored
"""
dataset = None
# Central windowing of auxiliary datasets (e.g. validation sets)
preprocessor = CentralWindow(self._window_shape)
for data in self._center:
preprocessor.apply(data)
# Do the initial random windowing
randomize_now = self._randomize + self._randomize_once
self._original = dict((data,
_zero_pad(data.get_topological_view().astype('float32'),
self._pad_randomized)) for data in randomize_now)
self.randomize_datasets(randomize_now)
def randomize_datasets(self, datasets):
"""
Applies random translations and flips to the selected datasets.
Parameters
----------
datasets : WRITEME
"""
for dataset in datasets:
if tuple(dataset.view_converter.axes) == ('c', 0, 1, 'b'):
wf_func = random_window_and_flip_c01b
elif tuple(dataset.view_converter.axes) == ('b', 0, 1, 'c'):
wf_func = random_window_and_flip_b01c
else:
raise ValueError("Axes of dataset is not supported: %s" %
(str(dataset.view_converter.axes)))
arr = wf_func(self._original[dataset],
self._window_shape,
rng=self._rng, flip=self._flip)
dataset.set_topological_view(arr, axes=dataset.view_converter.axes)
def on_monitor(self, model, dataset, algorithm):
"""
.. todo::
WRITEME
Notes
-----
All arguments are ignored.
"""
model = None
dataset = None
algorithm = None
self.randomize_datasets(self._randomize)
class WindowAndFlipC01B(WindowAndFlip):
"""
A specialized version of WindowAndFlip accepting datasets with axes C01B.
It exists due to backward compatibility.
Parameters
----------
window_shape : WRITEME
randomize : list, optional
If specified, a list of Datasets to randomly window and
flip at each epoch.
randomize_once : list, optional
If specified, a list of Datasets to randomly window and
flip once at the start of training.
center : list, optional
If specified, a list of Datasets to centrally window
once at the start of training.
rng : numpy.random.RandomState object or seed, optional
A random number generator or seed used to create one.
Seeded deterministically by default.
pad_randomized : int, optional
Amount of padding to add to each side of the images
in `randomize` and `randomize_once`. Useful if you
want to do zero-padded windowing with `window_shape`
the actual size of the dataset, and validate/test on
full-size images instead of central patches. Default
is 0.
flip : bool, optional
Reflect images on the horizontal axis with probability
0.5. `True` by default.
"""
def __init__(self, window_shape, randomize=None, randomize_once=None,
center=None, rng=(2013, 02, 20), pad_randomized=0, flip=True):
_randomize = randomize if randomize else []
_randomize_once = randomize_once if randomize_once else []
for data in _randomize + _randomize_once:
if tuple(data.view_converter.axes) != ('c', 0, 1, 'b'):
raise ValueError("Expected axes: ('c', 0, 1, 'b') "
"Actual axes: %s" %
str(tuple(data.view_converter.axes)))
warnings.warn("WindowAndFlipC01B is deprecated, use WindowAndFlip. " +
"WindowAndFlipC01B will be removed on or " +
"after August 25, 2014.", stacklevel=2)
super(WindowAndFlipC01B, self).__init__(window_shape,
randomize=randomize,
randomize_once=randomize_once,
center=center,
rng=rng,
pad_randomized=pad_randomized,
flip=flip)
| skearnes/pylearn2 | pylearn2/train_extensions/window_flip.py | Python | bsd-3-clause | 7,944 |
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser.decorators import id2url
from weboob.tools.browser import BaseBrowser
import urllib
from .pages import SearchPage, AdvertPage
from .job import PopolemploiJobAdvert
__all__ = ['PopolemploiBrowser']
class PopolemploiBrowser(BaseBrowser):
PROTOCOL = 'http'
DOMAIN = 'http://www.pole-emploi.fr/accueil/'
ENCODING = None
PAGES = {
'http://candidat.pole-emploi.fr/candidat/rechercheoffres/resultats(.*?)': SearchPage,
'http://candidat.pole-emploi.fr/candidat/rechercheoffres/detail/(?P<id>.+)': AdvertPage,
}
def search_job(self, pattern=None):
self.location('http://offre.pole-emploi.fr/resultat?offresPartenaires=true&libMetier=%s'
% pattern.replace(' ', '+'))
assert self.is_on_page(SearchPage)
return self.page.iter_job_adverts()
def advanced_search_job(self, metier='', place=None, contrat=None, salary=None,
qualification=None, limit_date=None, domain=None):
splitted_place = place.split('|')
params = 'A_%s_%s_%s__%s_P_%s_%s_%s_______INDIFFERENT______________%s' % (urllib.quote(metier).replace('%', '$00'),
splitted_place[1],
splitted_place[2],
contrat,
domain,
salary,
qualification,
limit_date
)
self.location('http://candidat.pole-emploi.fr/candidat/rechercheoffres/resultats/%s' % params)
assert self.is_on_page(SearchPage)
return self.page.iter_job_adverts()
@id2url(PopolemploiJobAdvert.id2url)
def get_job_advert(self, url, advert):
self.location(url)
assert self.is_on_page(AdvertPage)
return self.page.get_job_advert(url, advert)
| Boussadia/weboob | modules/popolemploi/browser.py | Python | agpl-3.0 | 3,057 |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import mock
from orquesta import statuses as wf_statuses
import st2tests
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
from tests.unit import base
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as ac_const
from st2common.models.db import liveaction as lv_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import liveaction as lv_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.services import action as ac_svc
from st2common.services import workflows as wf_svc
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import workflow as wf_ex_xport
from st2common.transport import publishers
from st2tests.mocks import liveaction as mock_lv_ac_xport
from st2tests.mocks import workflow as mock_wf_ex_xport
TEST_PACK = "orquesta_tests"
TEST_PACK_PATH = (
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/" + TEST_PACK
)
PACKS = [
TEST_PACK_PATH,
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/core",
]
@mock.patch.object(
publishers.CUDPublisher, "publish_update", mock.MagicMock(return_value=None)
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_create",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create),
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_state",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_create",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_create
),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_state",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_state
),
)
class OrquestaContextTest(st2tests.ExecutionDbTestCase):
@classmethod
def setUpClass(cls):
super(OrquestaContextTest, cls).setUpClass()
# Register runners.
runnersregistrar.register_runners()
# Register test pack(s).
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False, fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
def test_runtime_context(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "runtime-context.yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result
)
# Identify the records for the workflow.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
t1_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_ex_db.id)
)[0]
# Complete the worklfow.
wf_svc.handle_action_execution_completion(t1_ac_ex_db)
t1_ex_db = wf_db_access.TaskExecution.get_by_id(str(t1_ex_db.id))
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(wf_ex_db.id))
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Check result.
expected_st2_ctx = {
"action_execution_id": str(ac_ex_db.id),
"api_url": "http://127.0.0.1/v1",
"user": "stanley",
"pack": "orquesta_tests",
"action": "orquesta_tests.runtime-context",
"runner": "orquesta",
}
expected_st2_ctx_with_wf_ex_id = copy.deepcopy(expected_st2_ctx)
expected_st2_ctx_with_wf_ex_id["workflow_execution_id"] = str(wf_ex_db.id)
expected_output = {
"st2_ctx_at_input": expected_st2_ctx,
"st2_ctx_at_vars": expected_st2_ctx,
"st2_ctx_at_publish": expected_st2_ctx_with_wf_ex_id,
"st2_ctx_at_output": expected_st2_ctx_with_wf_ex_id,
}
expected_result = {"output": expected_output}
self.assertDictEqual(lv_ac_db.result, expected_result)
def test_action_context_sys_user(self):
wf_name = "subworkflow-default-value-from-action-context"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + ".yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result
)
# Identify the records for the main workflow.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
t1_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_ex_db.id)
)[0]
t1_wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(t1_ac_ex_db.id)
)[0]
self.assertEqual(t1_ex_db.status, wf_statuses.RUNNING)
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
self.assertEqual(t1_wf_ex_db.status, wf_statuses.RUNNING)
# Complete subworkflow under task1.
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task1"}
t1_t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t1_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t1_ac_ex_db)
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task2"}
t1_t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t2_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t2_ac_ex_db)
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task3"}
t1_t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t3_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t3_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t3_ac_ex_db)
t1_wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(t1_wf_ex_db.id))
t1_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t1_ac_ex_db.id))
self.assertEqual(t1_wf_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Complete task1 and main workflow.
wf_svc.handle_action_execution_completion(t1_ac_ex_db)
t1_ex_db = wf_db_access.TaskExecution.get_by_id(str(t1_ex_db.id))
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(wf_ex_db.id))
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Check result.
expected_result = {
"output": {"msg": "stanley, All your base are belong to us!"}
}
self.assertDictEqual(lv_ac_db.result, expected_result)
def test_action_context_api_user(self):
wf_name = "subworkflow-default-value-from-action-context"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + ".yaml")
lv_ac_db = lv_db_models.LiveActionDB(
action=wf_meta["name"], context={"api_user": "Thanos"}
)
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result
)
# Identify the records for the main workflow.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
t1_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_ex_db.id)
)[0]
t1_wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(t1_ac_ex_db.id)
)[0]
self.assertEqual(t1_ex_db.status, wf_statuses.RUNNING)
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
self.assertEqual(t1_wf_ex_db.status, wf_statuses.RUNNING)
# Complete subworkflow under task1.
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task1"}
t1_t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t1_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t1_ac_ex_db)
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task2"}
t1_t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t2_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t2_ac_ex_db)
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task3"}
t1_t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t3_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t3_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t3_ac_ex_db)
t1_wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(t1_wf_ex_db.id))
t1_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t1_ac_ex_db.id))
self.assertEqual(t1_wf_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Complete task1 and main workflow.
wf_svc.handle_action_execution_completion(t1_ac_ex_db)
t1_ex_db = wf_db_access.TaskExecution.get_by_id(str(t1_ex_db.id))
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(wf_ex_db.id))
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Check result.
expected_result = {"output": {"msg": "Thanos, All your base are belong to us!"}}
self.assertDictEqual(lv_ac_db.result, expected_result)
def test_action_context_no_channel(self):
wf_name = "subworkflow-source-channel-from-action-context"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + ".yaml")
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result
)
# Identify the records for the main workflow.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
t1_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_ex_db.id)
)[0]
t1_wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(t1_ac_ex_db.id)
)[0]
self.assertEqual(t1_ex_db.status, wf_statuses.RUNNING)
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
self.assertEqual(t1_wf_ex_db.status, wf_statuses.RUNNING)
# Complete subworkflow under task1.
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task1"}
t1_t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t1_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t1_ac_ex_db)
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task2"}
t1_t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t2_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t2_ac_ex_db)
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task3"}
t1_t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t3_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t3_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t3_ac_ex_db)
t1_wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(t1_wf_ex_db.id))
t1_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t1_ac_ex_db.id))
self.assertEqual(t1_wf_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Complete task1 and main workflow.
wf_svc.handle_action_execution_completion(t1_ac_ex_db)
t1_ex_db = wf_db_access.TaskExecution.get_by_id(str(t1_ex_db.id))
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(wf_ex_db.id))
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Check result.
expected_result = {
"output": {"msg": "no_channel, All your base are belong to us!"}
}
self.assertDictEqual(lv_ac_db.result, expected_result)
def test_action_context_source_channel(self):
wf_name = "subworkflow-source-channel-from-action-context"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + ".yaml")
lv_ac_db = lv_db_models.LiveActionDB(
action=wf_meta["name"], context={"source_channel": "general"}
)
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result
)
# Identify the records for the main workflow.
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
t1_ex_db = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)[0]
t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_ex_db.id)
)[0]
t1_wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(t1_ac_ex_db.id)
)[0]
self.assertEqual(t1_ex_db.status, wf_statuses.RUNNING)
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
self.assertEqual(t1_wf_ex_db.status, wf_statuses.RUNNING)
# Complete subworkflow under task1.
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task1"}
t1_t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t1_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t1_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t1_ac_ex_db)
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task2"}
t1_t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t2_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t2_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t2_ac_ex_db)
query_filters = {"workflow_execution": str(t1_wf_ex_db.id), "task_id": "task3"}
t1_t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_t3_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(t1_t3_ex_db.id)
)[0]
wf_svc.handle_action_execution_completion(t1_t3_ac_ex_db)
t1_wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(t1_wf_ex_db.id))
t1_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t1_ac_ex_db.id))
self.assertEqual(t1_wf_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(t1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Complete task1 and main workflow.
wf_svc.handle_action_execution_completion(t1_ac_ex_db)
t1_ex_db = wf_db_access.TaskExecution.get_by_id(str(t1_ex_db.id))
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(wf_ex_db.id))
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
# Check result.
expected_result = {
"output": {"msg": "general, All your base are belong to us!"}
}
self.assertDictEqual(lv_ac_db.result, expected_result)
| nzlosh/st2 | contrib/runners/orquesta_runner/tests/unit/test_context.py | Python | apache-2.0 | 19,115 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Sku(Model):
"""SKU parameters supplied to the create Redis operation.
:param name: The type of Redis cache to deploy. Valid values: (Basic,
Standard, Premium). Possible values include: 'Basic', 'Standard',
'Premium'
:type name: str or :class:`SkuName <azure.mgmt.redis.models.SkuName>`
:param family: The SKU family to use. Valid values: (C, P). (C =
Basic/Standard, P = Premium). Possible values include: 'C', 'P'
:type family: str or :class:`SkuFamily
<azure.mgmt.redis.models.SkuFamily>`
:param capacity: The size of the Redis cache to deploy. Valid values: for
C (Basic/Standard) family (0, 1, 2, 3, 4, 5, 6), for P (Premium) family
(1, 2, 3, 4).
:type capacity: int
"""
_validation = {
'name': {'required': True},
'family': {'required': True},
'capacity': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(self, name, family, capacity):
self.name = name
self.family = family
self.capacity = capacity
| v-iam/azure-sdk-for-python | azure-mgmt-redis/azure/mgmt/redis/models/sku.py | Python | mit | 1,722 |
from scrapy.spiders import Spider
from projects.proxies.proxy.items import ProxyItem
from helper.spiderUtil import getFirstofCss,getAllofCss
class XiciProxySpider(Spider):
name = "xicidaili"
allowed_domains = ["xicidaili.com"]
start_urls = [
"http://www.xicidaili.com/nn",
"http://www.xicidaili.com/nn/2",
"http://www.xicidaili.com/nn/3",
"http://www.xicidaili.com/nn/4",
"http://www.xicidaili.com/nn/5",
]
def parse(self, response):
skipheader = False
for tr in response.css("#ip_list tr"):
if not skipheader:
skipheader = True
continue
item = ProxyItem()
item['station'] = getFirstofCss(tr,"td img::attr(alt)")
item['ip'] = getFirstofCss(tr,"td:nth-child(2)::text")
item['port'] = getFirstofCss(tr,"td:nth-child(3)::text")
item['type'] = getFirstofCss(tr, "td:nth-child(5)::text")
item['httptype'] = getFirstofCss(tr, "td:nth-child(6)::text").lower()
yield item
| shengcanxu/CCrawler | source/projects/proxies/proxy/spiders/xicidailiproxy.py | Python | apache-2.0 | 1,077 |
import collections
from .types import (
is_primitive_type,
)
def normalize_object_for_json(obj):
if is_primitive_type(obj):
return obj
elif isinstance(obj, (collections.Sequence, collections.Set)):
return [
normalize_object_for_json(value)
for value
in obj
]
elif isinstance(obj, collections.Mapping):
return {
normalize_object_for_json(key): normalize_object_for_json(value)
for key, value
in obj.items()
}
else:
raise TypeError("Unable to normalize object of type: {0}".format(type(obj)))
| pipermerriam/populus | populus/utils/json.py | Python | mit | 634 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListLakeActions
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataplex
# [START dataplex_v1_generated_DataplexService_ListLakeActions_sync]
from google.cloud import dataplex_v1
def sample_list_lake_actions():
# Create a client
client = dataplex_v1.DataplexServiceClient()
# Initialize request argument(s)
request = dataplex_v1.ListLakeActionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_lake_actions(request=request)
# Handle the response
for response in page_result:
print(response)
# [END dataplex_v1_generated_DataplexService_ListLakeActions_sync]
| googleapis/python-dataplex | samples/generated_samples/dataplex_v1_generated_dataplex_service_list_lake_actions_sync.py | Python | apache-2.0 | 1,510 |
"""
Tests for cohorts
"""
# pylint: disable=no-member
import ddt
from django.contrib.auth.models import AnonymousUser, User
from django.db import IntegrityError
from django.http import Http404
from django.test import TestCase
from mock import call, patch
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from six import text_type
from six.moves import range
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import ToyCourseFactory
from .. import cohorts
from ..models import CourseCohort, CourseUserGroup, CourseUserGroupPartitionGroup, UnregisteredLearnerCohortAssignments
from ..tests.helpers import CohortFactory, CourseCohortFactory, config_course_cohorts, config_course_cohorts_legacy
@patch("openedx.core.djangoapps.course_groups.cohorts.tracker", autospec=True)
class TestCohortSignals(TestCase):
"""
Test cases to validate event emissions for various cohort-related workflows
"""
def setUp(self):
super(TestCohortSignals, self).setUp()
self.course_key = CourseLocator("dummy", "dummy", "dummy")
def test_cohort_added(self, mock_tracker):
# Add cohort
cohort = CourseUserGroup.objects.create(
name="TestCohort",
course_id=self.course_key,
group_type=CourseUserGroup.COHORT
)
mock_tracker.emit.assert_called_with(
"edx.cohort.created",
{"cohort_id": cohort.id, "cohort_name": cohort.name}
)
mock_tracker.reset_mock()
# Modify existing cohort
cohort.name = "NewName"
cohort.save()
self.assertFalse(mock_tracker.called)
# Add non-cohort group
CourseUserGroup.objects.create(
name="TestOtherGroupType",
course_id=self.course_key,
group_type="dummy"
)
self.assertFalse(mock_tracker.called)
def test_cohort_membership_changed(self, mock_tracker):
cohort_list = [CohortFactory() for _ in range(2)]
non_cohort = CourseUserGroup.objects.create(
name="dummy",
course_id=self.course_key,
group_type="dummy"
)
user_list = [UserFactory() for _ in range(2)]
mock_tracker.reset_mock()
def assert_events(event_name_suffix, user_list, cohort_list):
"""
Confirms the presence of the specifed event for each user in the specified list of cohorts
"""
expected_calls = [
call(
"edx.cohort.user_" + event_name_suffix,
{
"user_id": user.id,
"cohort_id": cohort.id,
"cohort_name": cohort.name,
}
)
for user in user_list for cohort in cohort_list
]
mock_tracker.emit.assert_has_calls(expected_calls, any_order=True)
# Add users to cohort
cohort_list[0].users.add(*user_list)
assert_events("added", user_list, cohort_list[:1])
mock_tracker.reset_mock()
# Remove users from cohort
cohort_list[0].users.remove(*user_list)
assert_events("removed", user_list, cohort_list[:1])
mock_tracker.reset_mock()
# Clear users from cohort
cohort_list[0].users.add(*user_list)
cohort_list[0].users.clear()
assert_events("removed", user_list, cohort_list[:1])
mock_tracker.reset_mock()
# Clear users from non-cohort group
non_cohort.users.add(*user_list)
non_cohort.users.clear()
self.assertFalse(mock_tracker.emit.called)
# Add cohorts to user
user_list[0].course_groups.add(*cohort_list)
assert_events("added", user_list[:1], cohort_list)
mock_tracker.reset_mock()
# Remove cohorts from user
user_list[0].course_groups.remove(*cohort_list)
assert_events("removed", user_list[:1], cohort_list)
mock_tracker.reset_mock()
# Clear cohorts from user
user_list[0].course_groups.add(*cohort_list)
user_list[0].course_groups.clear()
assert_events("removed", user_list[:1], cohort_list)
mock_tracker.reset_mock()
# Clear non-cohort groups from user
user_list[0].course_groups.add(non_cohort)
user_list[0].course_groups.clear()
self.assertFalse(mock_tracker.emit.called)
@ddt.ddt
class TestCohorts(ModuleStoreTestCase):
"""
Test the cohorts feature
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Make sure that course is reloaded every time--clear out the modulestore.
"""
super(TestCohorts, self).setUp()
self.toy_course_key = ToyCourseFactory.create().id
def _create_cohort(self, course_id, cohort_name, assignment_type):
"""
Create a cohort for testing.
"""
cohort = CohortFactory(course_id=course_id, name=cohort_name)
CourseCohortFactory(course_user_group=cohort, assignment_type=assignment_type)
return cohort
def test_is_course_cohorted(self):
"""
Make sure cohorts.is_course_cohorted() correctly reports if a course is cohorted or not.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
config_course_cohorts(course, is_cohorted=True)
self.assertTrue(cohorts.is_course_cohorted(course.id))
# Make sure we get a Http404 if there's no course
fake_key = CourseLocator('a', 'b', 'c')
self.assertRaises(Http404, lambda: cohorts.is_course_cohorted(fake_key))
def test_get_cohort_id(self):
"""
Make sure that cohorts.get_cohort_id() correctly returns the cohort id, or raises a ValueError when given an
invalid course key.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user = UserFactory(username="test", email="a@b.com")
self.assertIsNone(cohorts.get_cohort_id(user, course.id))
config_course_cohorts(course, is_cohorted=True)
cohort = CohortFactory(course_id=course.id, name="TestCohort", users=[user])
self.assertEqual(cohorts.get_cohort_id(user, course.id), cohort.id)
self.assertRaises(
Http404,
lambda: cohorts.get_cohort_id(user, CourseLocator("course", "does_not", "exist"))
)
def test_assignment_type(self):
"""
Make sure that cohorts.set_assignment_type() and cohorts.get_assignment_type() works correctly.
"""
course = modulestore().get_course(self.toy_course_key)
# We are creating two random cohorts because we can't change assignment type of
# random cohort if it is the only random cohort present.
cohort1 = self._create_cohort(course.id, "TestCohort1", CourseCohort.RANDOM)
self._create_cohort(course.id, "TestCohort2", CourseCohort.RANDOM)
cohort3 = self._create_cohort(course.id, "TestCohort3", CourseCohort.MANUAL)
self.assertEqual(cohorts.get_assignment_type(cohort1), CourseCohort.RANDOM)
cohorts.set_assignment_type(cohort1, CourseCohort.MANUAL)
self.assertEqual(cohorts.get_assignment_type(cohort1), CourseCohort.MANUAL)
cohorts.set_assignment_type(cohort3, CourseCohort.RANDOM)
self.assertEqual(cohorts.get_assignment_type(cohort3), CourseCohort.RANDOM)
def test_cannot_set_assignment_type(self):
"""
Make sure that we can't change the assignment type of a random cohort if it is the only random cohort present.
"""
course = modulestore().get_course(self.toy_course_key)
cohort = self._create_cohort(course.id, "TestCohort", CourseCohort.RANDOM)
self.assertEqual(cohorts.get_assignment_type(cohort), CourseCohort.RANDOM)
exception_msg = "There must be one cohort to which students can automatically be assigned."
with self.assertRaises(ValueError) as context_manager:
cohorts.set_assignment_type(cohort, CourseCohort.MANUAL)
self.assertEqual(exception_msg, str(context_manager.exception))
def test_get_cohort(self):
"""
Make sure cohorts.get_cohort() does the right thing when the course is cohorted
"""
course = modulestore().get_course(self.toy_course_key)
self.assertEqual(course.id, self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user = UserFactory(username="test", email="a@b.com")
other_user = UserFactory(username="test2", email="a2@b.com")
self.assertIsNone(cohorts.get_cohort(user, course.id), "No cohort created yet")
cohort = CohortFactory(course_id=course.id, name="TestCohort", users=[user])
self.assertIsNone(
cohorts.get_cohort(user, course.id),
"Course isn't cohorted, so shouldn't have a cohort"
)
# Make the course cohorted...
config_course_cohorts(course, is_cohorted=True)
self.assertEqual(
cohorts.get_cohort(user, course.id).id,
cohort.id,
"user should be assigned to the correct cohort"
)
self.assertEqual(
cohorts.get_cohort(other_user, course.id).id,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).id,
"other_user should be assigned to the default cohort"
)
def test_get_cohort_preassigned_user(self):
"""
When an email address is added to a cohort and a user signs up for the course with that email address,
the user should automatically be added to that cohort and not a random cohort.
"""
course = modulestore().get_course(self.toy_course_key)
cohort = CohortFactory(course_id=course.id, name="TestCohort", users=[])
CohortFactory(course_id=course.id, name="RandomCohort", users=[])
config_course_cohorts(course, is_cohorted=True)
# Add email address to the cohort
(user, previous_cohort, prereg) = cohorts.add_user_to_cohort(cohort, "email@example.com")
self.assertEqual(
(user, previous_cohort, prereg),
(None, None, True)
)
# Create user with this email address
user = UserFactory(username="test", email="email@example.com")
self.assertEqual(
cohorts.get_cohort(user, course.id).id,
cohort.id,
"User should be assigned to the right cohort"
)
def test_get_cohort_multiple_preassignments(self):
"""
When an email address is added to multiple cohorts, the last cohort assignment should be respected.
Then, when a user signs up for the course with that email address,
the user should automatically be added to that cohort and not a random cohort.
"""
course = modulestore().get_course(self.toy_course_key)
cohort = CohortFactory(course_id=course.id, name="TestCohort", users=[])
cohort2 = CohortFactory(course_id=course.id, name="RandomCohort", users=[])
config_course_cohorts(course, is_cohorted=True)
# Add email address to the first cohort
(user, previous_cohort, prereg) = cohorts.add_user_to_cohort(cohort, "email@example.com")
self.assertEqual(
(user, previous_cohort, prereg),
(None, None, True)
)
# Add email address to the second cohort
(user, previous_cohort, prereg) = cohorts.add_user_to_cohort(cohort2, "email@example.com")
self.assertEqual(
(user, previous_cohort, prereg),
(None, None, True)
)
# Create user with this email address
user = UserFactory(username="test", email="email@example.com")
self.assertEqual(
cohorts.get_cohort(user, course.id).id,
cohort2.id,
"User should be assigned to the right cohort"
)
@ddt.data(
(True, 2),
(False, 6),
)
@ddt.unpack
def test_get_cohort_sql_queries(self, use_cached, num_sql_queries):
"""
Test number of queries by cohorts.get_cohort() with and without caching.
"""
course = modulestore().get_course(self.toy_course_key)
config_course_cohorts(course, is_cohorted=True)
user = UserFactory(username="test", email="a@b.com")
CohortFactory.create(course_id=course.id, name="TestCohort", users=[user])
with self.assertNumQueries(num_sql_queries):
for __ in range(3):
cohorts.get_cohort(user, course.id, use_cached=use_cached)
def test_get_cohort_with_assign(self):
"""
Make sure cohorts.get_cohort() returns None if no group is already
assigned to a user instead of assigning/creating a group automatically
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user = UserFactory(username="test", email="a@b.com")
# Add an auto_cohort_group to the course...
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=["AutoGroup"]
)
# get_cohort should return None as no group is assigned to user
self.assertIsNone(cohorts.get_cohort(user, course.id, assign=False))
# get_cohort should return a group for user
self.assertEqual(cohorts.get_cohort(user, course.id).name, "AutoGroup")
def test_cohorting_with_auto_cohorts(self):
"""
Make sure cohorts.get_cohort() does the right thing.
If there are auto cohort groups then a user should be assigned one.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user1 = UserFactory(username="test", email="a@b.com")
user2 = UserFactory(username="test2", email="a2@b.com")
cohort = CohortFactory(course_id=course.id, name="TestCohort", users=[user1])
# Add an auto_cohort_group to the course...
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=["AutoGroup"]
)
self.assertEqual(cohorts.get_cohort(user1, course.id).id, cohort.id, "user1 should stay put")
self.assertEqual(cohorts.get_cohort(user2, course.id).name, "AutoGroup", "user2 should be auto-cohorted")
def test_anonymous_user_cohort(self):
"""
Anonymous user is not assigned to any cohort group.
"""
course = modulestore().get_course(self.toy_course_key)
# verify cohorts is None when course is not cohorted
self.assertIsNone(cohorts.get_cohort(AnonymousUser(), course.id))
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=["AutoGroup"]
)
self.assertIsNone(cohorts.get_cohort(AnonymousUser(), course.id))
def test_cohorting_with_migrations_done(self):
"""
Verifies that cohort config changes on studio/moduletore side will
not be reflected on lms after the migrations are done.
"""
course = modulestore().get_course(self.toy_course_key)
user1 = UserFactory(username="test", email="a@b.com")
user2 = UserFactory(username="test2", email="a2@b.com")
# Add an auto_cohort_group to the course...
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=["AutoGroup"]
)
self.assertEqual(cohorts.get_cohort(user1, course.id).name, "AutoGroup", "user1 should be auto-cohorted")
# Now set the auto_cohort_group to something different
# This will have no effect on lms side as we are already done with migrations
config_course_cohorts_legacy(
course,
cohorted=True,
auto_cohort_groups=["OtherGroup"]
)
self.assertEqual(
cohorts.get_cohort(user2, course.id).name, "AutoGroup", "user2 should be assigned to AutoGroups"
)
self.assertEqual(
cohorts.get_cohort(user1, course.id).name, "AutoGroup", "user1 should still be in originally placed cohort"
)
def test_cohorting_with_no_auto_cohorts(self):
"""
Make sure cohorts.get_cohort() does the right thing.
If there are not auto cohorts then a user should be assigned to Default Cohort Group.
Also verifies that cohort config changes on studio/moduletore side will
not be reflected on lms after the migrations are done.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
user1 = UserFactory(username="test", email="a@b.com")
user2 = UserFactory(username="test2", email="a2@b.com")
# Make the auto_cohort_group list empty
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=[]
)
self.assertEqual(
cohorts.get_cohort(user1, course.id).id,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).id,
"No groups->default cohort for user1"
)
# Add an auto_cohort_group to the course
# This will have no effect on lms side as we are already done with migrations
config_course_cohorts_legacy(
course,
cohorted=True,
auto_cohort_groups=["AutoGroup"]
)
self.assertEqual(
cohorts.get_cohort(user1, course.id).name,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).name,
"user1 should still be in the default cohort"
)
self.assertEqual(
cohorts.get_cohort(user2, course.id).id,
cohorts.get_cohort_by_name(course.id, cohorts.DEFAULT_COHORT_NAME).id,
"No groups->default cohort for user2"
)
def test_auto_cohorting_randomization(self):
"""
Make sure cohorts.get_cohort() randomizes properly.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
groups = ["group_{0}".format(n) for n in range(5)]
config_course_cohorts(
course, is_cohorted=True, auto_cohorts=groups
)
# Assign 100 users to cohorts
for i in range(100):
user = UserFactory(
username="test_{0}".format(i),
email="a@b{0}.com".format(i)
)
cohorts.get_cohort(user, course.id)
# Now make sure that the assignment was at least vaguely random:
# each cohort should have at least 1, and fewer than 50 students.
# (with 5 groups, probability of 0 users in any group is about
# .8**100= 2.0e-10)
for cohort_name in groups:
cohort = cohorts.get_cohort_by_name(course.id, cohort_name)
num_users = cohort.users.count()
self.assertGreater(num_users, 1)
self.assertLess(num_users, 50)
def test_get_course_cohorts_noop(self):
"""
Tests get_course_cohorts returns an empty list when no cohorts exist.
"""
course = modulestore().get_course(self.toy_course_key)
config_course_cohorts(course, is_cohorted=True)
self.assertEqual([], cohorts.get_course_cohorts(course))
def test_get_course_cohorts(self):
"""
Tests that get_course_cohorts returns all cohorts, including auto cohorts.
"""
course = modulestore().get_course(self.toy_course_key)
config_course_cohorts(
course,
is_cohorted=True,
auto_cohorts=["AutoGroup1", "AutoGroup2"]
)
# add manual cohorts to course 1
CohortFactory(course_id=course.id, name="ManualCohort")
CohortFactory(course_id=course.id, name="ManualCohort2")
cohort_set = {c.name for c in cohorts.get_course_cohorts(course)}
self.assertEqual(cohort_set, {"AutoGroup1", "AutoGroup2", "ManualCohort", "ManualCohort2"})
def test_get_cohort_names(self):
course = modulestore().get_course(self.toy_course_key)
cohort1 = CohortFactory(course_id=course.id, name="Cohort1")
cohort2 = CohortFactory(course_id=course.id, name="Cohort2")
self.assertEqual(
cohorts.get_cohort_names(course),
{cohort1.id: cohort1.name, cohort2.id: cohort2.name}
)
def test_get_cohort_by_name(self):
"""
Make sure cohorts.get_cohort_by_name() properly finds a cohort by name for a given course. Also verify that it
raises an error when the cohort is not found.
"""
course = modulestore().get_course(self.toy_course_key)
self.assertRaises(
CourseUserGroup.DoesNotExist,
lambda: cohorts.get_cohort_by_name(course.id, "CohortDoesNotExist")
)
cohort = CohortFactory(course_id=course.id, name="MyCohort")
self.assertEqual(cohorts.get_cohort_by_name(course.id, "MyCohort"), cohort)
self.assertRaises(
CourseUserGroup.DoesNotExist,
lambda: cohorts.get_cohort_by_name(CourseLocator("course", "does_not", "exist"), cohort)
)
def test_get_cohort_by_id(self):
"""
Make sure cohorts.get_cohort_by_id() properly finds a cohort by id for a given
course.
"""
course = modulestore().get_course(self.toy_course_key)
cohort = CohortFactory(course_id=course.id, name="MyCohort")
self.assertEqual(cohorts.get_cohort_by_id(course.id, cohort.id), cohort)
cohort.delete()
self.assertRaises(
CourseUserGroup.DoesNotExist,
lambda: cohorts.get_cohort_by_id(course.id, cohort.id)
)
@patch("openedx.core.djangoapps.course_groups.cohorts.tracker")
def test_add_cohort(self, mock_tracker):
"""
Make sure cohorts.add_cohort() properly adds a cohort to a course and handles
errors.
"""
assignment_type = CourseCohort.RANDOM
course = modulestore().get_course(self.toy_course_key)
added_cohort = cohorts.add_cohort(course.id, "My Cohort", assignment_type)
mock_tracker.emit.assert_any_call(
"edx.cohort.creation_requested",
{"cohort_name": added_cohort.name, "cohort_id": added_cohort.id}
)
self.assertEqual(added_cohort.name, "My Cohort")
self.assertRaises(
ValueError,
lambda: cohorts.add_cohort(course.id, "My Cohort", assignment_type)
)
does_not_exist_course_key = CourseLocator("course", "does_not", "exist")
self.assertRaises(
ValueError,
lambda: cohorts.add_cohort(does_not_exist_course_key, "My Cohort", assignment_type)
)
@patch("openedx.core.djangoapps.course_groups.cohorts.tracker")
@patch("openedx.core.djangoapps.course_groups.cohorts.COHORT_MEMBERSHIP_UPDATED")
def test_add_user_to_cohort(self, mock_signal, mock_tracker):
"""
Make sure cohorts.add_user_to_cohort() properly adds a user to a cohort and
handles errors.
"""
course_user = UserFactory(username="Username", email="a@b.com")
UserFactory(username="RandomUsername", email="b@b.com")
course = modulestore().get_course(self.toy_course_key)
CourseEnrollment.enroll(course_user, self.toy_course_key)
first_cohort = CohortFactory(course_id=course.id, name="FirstCohort")
second_cohort = CohortFactory(course_id=course.id, name="SecondCohort")
def check_and_reset_signal():
mock_signal.send.assert_called_with(sender=None, user=course_user, course_key=self.toy_course_key)
mock_signal.reset_mock()
# Success cases
# We shouldn't get back a previous cohort, since the user wasn't in one
self.assertEqual(
cohorts.add_user_to_cohort(first_cohort, "Username"),
(course_user, None, False)
)
mock_tracker.emit.assert_any_call(
"edx.cohort.user_add_requested",
{
"user_id": course_user.id,
"cohort_id": first_cohort.id,
"cohort_name": first_cohort.name,
"previous_cohort_id": None,
"previous_cohort_name": None,
}
)
check_and_reset_signal()
# Should get (user, previous_cohort_name) when moved from one cohort to
# another
self.assertEqual(
cohorts.add_user_to_cohort(second_cohort, "Username"),
(course_user, "FirstCohort", False)
)
mock_tracker.emit.assert_any_call(
"edx.cohort.user_add_requested",
{
"user_id": course_user.id,
"cohort_id": second_cohort.id,
"cohort_name": second_cohort.name,
"previous_cohort_id": first_cohort.id,
"previous_cohort_name": first_cohort.name,
}
)
check_and_reset_signal()
# Should preregister email address for a cohort if an email address
# not associated with a user is added
(user, previous_cohort, prereg) = cohorts.add_user_to_cohort(first_cohort, "new_email@example.com")
self.assertEqual(
(user, previous_cohort, prereg),
(None, None, True)
)
mock_tracker.emit.assert_any_call(
"edx.cohort.email_address_preassigned",
{
"user_email": "new_email@example.com",
"cohort_id": first_cohort.id,
"cohort_name": first_cohort.name,
}
)
# Error cases
# Should get ValueError if user already in cohort
self.assertRaises(
ValueError,
lambda: cohorts.add_user_to_cohort(second_cohort, "Username")
)
# UserDoesNotExist if user truly does not exist
self.assertRaises(
User.DoesNotExist,
lambda: cohorts.add_user_to_cohort(first_cohort, "non_existent_username")
)
def test_set_cohorted_with_invalid_data_type(self):
"""
Test that cohorts.set_course_cohorted raises exception if argument is not a boolean.
"""
course = modulestore().get_course(self.toy_course_key)
with self.assertRaises(ValueError) as value_error:
cohorts.set_course_cohorted(course.id, 'not a boolean')
self.assertEqual("Cohorted must be a boolean", text_type(value_error.exception))
@ddt.ddt
class TestCohortsAndPartitionGroups(ModuleStoreTestCase):
"""
Test Cohorts and Partitions Groups.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a test course and cohorts for each test
"""
super(TestCohortsAndPartitionGroups, self).setUp()
self.test_course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.test_course_key)
self.first_cohort = CohortFactory(course_id=self.course.id, name="FirstCohort")
self.second_cohort = CohortFactory(course_id=self.course.id, name="SecondCohort")
self.partition_id = 1
self.group1_id = 10
self.group2_id = 20
def _link_cohort_partition_group(self, cohort, partition_id, group_id):
"""
Utility to create cohort -> partition group assignments in the database.
"""
link = CourseUserGroupPartitionGroup(
course_user_group=cohort,
partition_id=partition_id,
group_id=group_id,
)
link.save()
return link
def test_get_group_info_for_cohort(self):
"""
Basic test of the partition_group_info accessor function
"""
# api should return nothing for an unmapped cohort
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(None, None),
)
# create a link for the cohort in the db
link = self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id
)
# api should return the specified partition and group
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(self.group1_id, self.partition_id)
)
# delete the link in the db
link.delete()
# api should return nothing again
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(None, None),
)
@ddt.data(
(True, 1),
(False, 3),
)
@ddt.unpack
def test_get_group_info_for_cohort_queries(self, use_cached, num_sql_queries):
"""
Basic test of the partition_group_info accessor function
"""
# create a link for the cohort in the db
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id
)
with self.assertNumQueries(num_sql_queries):
for __ in range(3):
self.assertIsNotNone(cohorts.get_group_info_for_cohort(self.first_cohort, use_cached=use_cached))
def test_multiple_cohorts(self):
"""
Test that multiple cohorts can be linked to the same partition group
"""
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id,
)
self._link_cohort_partition_group(
self.second_cohort,
self.partition_id,
self.group1_id,
)
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(self.group1_id, self.partition_id),
)
self.assertEqual(
cohorts.get_group_info_for_cohort(self.second_cohort),
cohorts.get_group_info_for_cohort(self.first_cohort),
)
def test_multiple_partition_groups(self):
"""
Test that a cohort cannot be mapped to more than one partition group
"""
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id,
)
with self.assertRaises(IntegrityError):
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group2_id,
)
def test_delete_cascade(self):
"""
Test that cohort -> partition group links are automatically deleted
when their parent cohort is deleted.
"""
self._link_cohort_partition_group(
self.first_cohort,
self.partition_id,
self.group1_id
)
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(self.group1_id, self.partition_id)
)
# delete the link
self.first_cohort.delete()
# api should return nothing at that point
self.assertEqual(
cohorts.get_group_info_for_cohort(self.first_cohort),
(None, None),
)
# link should no longer exist because of delete cascade
with self.assertRaises(CourseUserGroupPartitionGroup.DoesNotExist):
CourseUserGroupPartitionGroup.objects.get(
course_user_group_id=self.first_cohort.id
)
class TestUnregisteredLearnerCohortAssignments(TestCase):
"""
Tests the UnregisteredLearnerCohortAssignment.retire_user method.
"""
def setUp(self):
super(TestUnregisteredLearnerCohortAssignments, self).setUp()
self.course_key = CourseKey.from_string('course-v1:edX+DemoX+Demo_Course')
self.cohort = CourseUserGroup.objects.create(
name="TestCohort",
course_id=self.course_key,
group_type=CourseUserGroup.COHORT
)
self.cohort_assignment = UnregisteredLearnerCohortAssignments.objects.create(
course_user_group=self.cohort,
course_id=self.course_key,
email='learner@example.com'
)
def test_retired_user_has_deleted_record(self):
was_retired = UnregisteredLearnerCohortAssignments.delete_by_user_value(
value='learner@example.com',
field='email'
)
self.assertTrue(was_retired)
search_retired_user_results = \
UnregisteredLearnerCohortAssignments.objects.filter(
email=self.cohort_assignment.email
)
self.assertFalse(search_retired_user_results)
def test_retired_user_with_no_cohort_returns_false(self):
known_learner_email = self.cohort_assignment.email
was_retired = UnregisteredLearnerCohortAssignments.delete_by_user_value(
value='nonexistantlearner@example.com',
field='email'
)
self.assertFalse(was_retired)
self.assertEqual(self.cohort_assignment.email, known_learner_email)
| cpennington/edx-platform | openedx/core/djangoapps/course_groups/tests/test_cohorts.py | Python | agpl-3.0 | 33,726 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class StyleSheetList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid):
"""
Initialize the StyleSheetList
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant that is the parent of the resource
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetList
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetList
"""
super(StyleSheetList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, }
def get(self):
"""
Constructs a StyleSheetContext
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
"""
return StyleSheetContext(self._version, assistant_sid=self._solution['assistant_sid'], )
def __call__(self):
"""
Constructs a StyleSheetContext
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
"""
return StyleSheetContext(self._version, assistant_sid=self._solution['assistant_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.StyleSheetList>'
class StyleSheetPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the StyleSheetPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The SID of the Assistant that is the parent of the resource
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetPage
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetPage
"""
super(StyleSheetPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of StyleSheetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
"""
return StyleSheetInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.StyleSheetPage>'
class StyleSheetContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid):
"""
Initialize the StyleSheetContext
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant with the StyleSheet resource to fetch
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
"""
super(StyleSheetContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, }
self._uri = '/Assistants/{assistant_sid}/StyleSheet'.format(**self._solution)
def fetch(self):
"""
Fetch the StyleSheetInstance
:returns: The fetched StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return StyleSheetInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
def update(self, style_sheet=values.unset):
"""
Update the StyleSheetInstance
:param dict style_sheet: The JSON string that describes the style sheet object
:returns: The updated StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
"""
data = values.of({'StyleSheet': serialize.object(style_sheet), })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return StyleSheetInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.StyleSheetContext {}>'.format(context)
class StyleSheetInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, assistant_sid):
"""
Initialize the StyleSheetInstance
:returns: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
"""
super(StyleSheetInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'assistant_sid': payload.get('assistant_sid'),
'url': payload.get('url'),
'data': payload.get('data'),
}
# Context
self._context = None
self._solution = {'assistant_sid': assistant_sid, }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: StyleSheetContext for this StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetContext
"""
if self._context is None:
self._context = StyleSheetContext(self._version, assistant_sid=self._solution['assistant_sid'], )
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def assistant_sid(self):
"""
:returns: The SID of the Assistant that is the parent of the resource
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def url(self):
"""
:returns: The absolute URL of the StyleSheet resource
:rtype: unicode
"""
return self._properties['url']
@property
def data(self):
"""
:returns: The JSON string that describes the style sheet object
:rtype: dict
"""
return self._properties['data']
def fetch(self):
"""
Fetch the StyleSheetInstance
:returns: The fetched StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
"""
return self._proxy.fetch()
def update(self, style_sheet=values.unset):
"""
Update the StyleSheetInstance
:param dict style_sheet: The JSON string that describes the style sheet object
:returns: The updated StyleSheetInstance
:rtype: twilio.rest.autopilot.v1.assistant.style_sheet.StyleSheetInstance
"""
return self._proxy.update(style_sheet=style_sheet, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.StyleSheetInstance {}>'.format(context)
| twilio/twilio-python | twilio/rest/autopilot/v1/assistant/style_sheet.py | Python | mit | 9,152 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mapeo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='organizaciones',
name='slug',
field=models.SlugField(default=1, max_length=450, editable=False),
preserve_default=False,
),
migrations.AlterField(
model_name='organizaciones',
name='tipo',
field=models.IntegerField(choices=[(1, b'Organizaci\xc3\xb3n que apoya y participa con la Campa\xc3\xb1a'), (2, b'Comit\xc3\xa9 comunal'), (3, b'Diplomado de promotor\xc3\xada'), (4, b'Diplomado de comunicaci\xc3\xb3n')]),
),
]
| CARocha/plataforma_fadcanic | mapeo/migrations/0002_auto_20151117_1648.py | Python | mit | 800 |
from flask import Flask, redirect
from flask.ext.basicauth import BasicAuth
from creds import username, password
from flask_admin import Admin
from flask_admin.contrib.fileadmin import FileAdmin
import os.path as op
# Flask setup here
app = Flask(__name__)
admin = Admin(app, name='Filio', template_mode='bootstrap3')
path = op.join(op.dirname(__file__), 'static')
admin.add_view(FileAdmin(path, '/static/', name='Static Files'))
app.config['BASIC_AUTH_USERNAME'] = username
app.config['BASIC_AUTH_PASSWORD'] = password
app.config['BASIC_AUTH_FORCE'] = True
basic_auth = BasicAuth(app)
@app.route('/')
@basic_auth.required
def secret_view():
return redirect('/admin')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9500, debug=True) | rudimk/filio | app.py | Python | mit | 753 |
#!/usr/bin/python
# boto3 python client to download files from S3 and check md5
# AWS_ACCESS_KEY_ID .. The access key for your AWS account.
# AWS_SECRET_ACCESS_KEY .. The secret key for your AWS account.
# folker@anl.gov
import sys, getopt, boto3, hashlib, io
import argparse
def md5sum(src, length=io.DEFAULT_BUFFER_SIZE):
md5 = hashlib.md5()
with io.open(src, mode="rb") as fd:
for chunk in iter(lambda: fd.read(length), b''):
md5.update(chunk)
return md5.hexdigest()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a","--keyid", default=None, help=" aws_access_key_id")
parser.add_argument("-b","--bucket", default=None, help="AWS bucket")
parser.add_argument("-t","--tmpfile", default=None,help="filename to create")
parser.add_argument("-o","--objectname", default=None,help="object to download")
parser.add_argument("-k","--accesskey", default=None, help="aws_secret_access_key")
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase output verbosity")
parser.add_argument("-r","--region", default=None, help="AWS region")
parser.add_argument("-s","--s3endpoint", default="https://s3.it.anl.gov:18082")
args = parser.parse_args()
# if args.region is '':
# args.region=' '
if args.verbose:
print ('keyId is =', args.keyid)
print ('accessKey is =', args.accesskey)
print ('bucket is =', args.bucket)
print ('tmpfile is =', args.tmpfile)
print ('region is=', args.region)
print ('object is =', args.objectname)
if args.tmpfile is None:
print ('we need a filename')
sys.exit(2)
# if passed use credentials to establish connection
if args.accesskey is None:
if args.verbose:
print ('using existing credentials from ENV vars or files')
s3 = boto3.client('s3',
endpoint_url=args.s3endpoint,
region_name=args.region
)
else:
# use env. default for connection details --> see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
if args.verbose:
print ('using credentials from cmd-line')
s3 = boto3.client('s3',
endpoint_url=args.s3endpoint,
region_name=args.region,
aws_access_key_id=args.keyid,
aws_secret_access_key=args.accesskey
)
with open(args.tmpfile, 'wb') as f:
s3.download_fileobj(args.bucket, args.objectname, f)
md5_new = md5sum(args.tmpfile)
print(md5_new)
sys.exit(0)
main() | MG-RAST/Shock | shock-server/plug-ins/boto-s3-download.py | Python | bsd-2-clause | 2,570 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.mininode import *
from test_framework.test_framework import SarielsazTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
direct_fetch_response_time = 0.05
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
def on_inv(self, conn, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if "headers" in self.last_message:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_message["headers"].headers ]
if hash_headers != expect_headers:
success = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
return success
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
wait_until(test_function, timeout=timeout, lock=mininode_lock)
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
class SendHeadersTest(SarielsazTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = TestNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders()
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[ inv_node.send_block_inv(x.sha256) for x in blocks ]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
if __name__ == '__main__':
SendHeadersTest().main()
| sarielsaz/sarielsaz | test/functional/sendheaders.py | Python | mit | 24,054 |
# Copyright (C) 2012-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
from MEDLoader import *
from MEDCouplingRemapper import *
ms=MEDFileMesh.New("MeshSource.med") ; ms=ms.getMeshAtLevel(0)
mt=MEDFileMesh.New("MeshTarget.med") ; mt=mt.getMeshAtLevel(0)
fs=ms.fillFromAnalytic(ON_CELLS,1,"319.*cos(((x)*(x)*3+(y-0.52)*(y-0.52)+(z-0.1)*(z-0.1))*7)")
fs.setNature(ConservativeVolumic)
fs.setName("Temperature")
MEDCouplingFieldDouble.WriteVTK("MeshSource.vtu",[fs])
mt2=mt.deepCpy()
mt2.translate([0.4,0.,0.])
mt2.writeVTK("MeshTarget.vtu")
#
remap=MEDCouplingRemapper()
remap.prepare(ms,mt,"P0P0")
ft=remap.transferField(fs,1e100)
ft.setName("Temperature")
ft.getMesh().translate([0.4,0.,0.])
MEDCouplingFieldDouble.WriteVTK("FieldTarget.vtu",[ft])
# Image illustrating the starting point:
# - load MeshSource.vtu (contains mesh+field)
# - load MeshTarget.vtu (contains only the mesh with translation)
#
# Image illustrating the result point:
# - load MeshSource.vtu
# - load FieldTarget.vtu
| FedoraScientific/salome-med | src/MEDOP/tut/projection/demovtu/projection.py | Python | lgpl-2.1 | 1,782 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Repeat our danger."""
import task_03
KLAXON = task_03.KLAXON
KLAXON = task_03.KLAXON *5
| MJVarghese/is210-week-03-warmup | task_04.py | Python | mpl-2.0 | 141 |
#!/usr/bin/env python
import sys
import urllib
import subprocess
import os
import yaml
import markdown
import lxml.html
import re
import html2text
package_location = "../../IT-systems"
menu_file = "../../_data/menu.yml"
menu_base = "/IT-systems/"
menu_base_name = "IT-systems"
placeholder_start = '<div style="display:none" class="generated_start">generated items start</div>'
placeholder_end = '<div style="display:none" class="generated_end">generated items end</div>'
def git(*args):
return
# return subprocess.check_call(['git'] + list(args))
def add_folder_to_git(repo_name):
git("add", repo_name)
def push_changes():
git("commit", "-am 'updated docs'")
# git("commit", "-m 'rebuild pages' --allow-empty")
# git("push")
def html2lxml(html):
return lxml.html.fromstring(html)
def lxml2html(tree):
return lxml.html.tostring(lxml)
def html2md(html):
return html2text.html2text(html)
def md2html(md):
return markdown.markdown(md)
def md2lxml(md):
return html2lxml(md2html(md))
def lxml2md(tree):
return html2md(lxml2html(tree))
def loadReadme(repo_url):
repo_url = repo_url.split("\n")[0]
repo_name = repo_url.split("github.com/")[1]
readme_url = "https://raw.githubusercontent.com/%s/master/README.md" %repo_name
resp = urllib.urlopen(readme_url)
readme_md = resp.read()
return readme_md
def addTitleToReadme(md, repo_url):
tree = md2lxml(md)
h1 = tree.find("h1").text
package_name = repo_url.split("/")[-1]
title = h1 or package_name
md = "---\ntitle: %s\n---\n%s" %(title, md)
return md, title
def loadMenu():
f = open(menu_file, 'r')
menu_str = f.read()
f.close()
menu = yaml.load(menu_str)
return menu
def saveMenu(menu):
os.remove(menu_file)
f = open(menu_file, "w")
f.write(yaml.dump(menu, default_flow_style=False))
f.close()
def build_submenu(parent, tree, repo_name):
submenu = {}
submenu['text'] = parent
submenu['url'] = "%s%s/#%s" %(menu_base, repo_name, re.sub('\s+', '-', parent.lower()))
if len(tree) > 0:
submenu['subitems'] = []
for element in tree:
submenu['subitems'].append(build_submenu(element.keys()[0], element.values()[0], repo_name))
return submenu
def updateMenu(md, repo_name):
# md = """# main title
## title 1
## title 2
### title 2.1
### title 2.2
#### title 2.3
## title 3
#### title 3.1
## title 4
### title 4.1"""
available_items = ["h1", "h2", "h3", "h4", "h5", "h6"]
menu = loadMenu()
readme_tree = md2lxml(md)
elements = readme_tree.findall("*")
menu_item_tree = []
parent_elements = []
for element in elements:
if element.tag.lower() in available_items:
if element.text[:6] == 'title:':
continue
new_parent_elements = []
for parent in parent_elements:
if element.tag.lower() > parent.tag.lower():
new_parent_elements.append(parent)
new_parent_elements.append(element)
pointer = menu_item_tree
for parent in new_parent_elements[:-1]:
for menu_element in pointer:
if menu_element.has_key(parent.text):
pointer = menu_element[parent.text]
new_element = {}
new_element[element.text] = []
pointer.append(new_element)
parent_elements = new_parent_elements
new_menu_item = build_submenu(menu_item_tree[0].keys()[0], menu_item_tree[0].values()[0], repo_name)
new_menu_item['source'] = 'generated'
new_menu_item['url'] = new_menu_item['url'].split('/#')[0]
for submenu in menu:
if submenu['text'] == menu_base_name:
new_it_systems_submenu_from_menu = []
new_it_systems_submenu_from_generated = []
added = False
for item in submenu['subitems']:
if item.has_key('source') and item['source'] == 'generated':
if item['text'] != new_menu_item['text']:
new_it_systems_submenu_from_generated.append(item)
else:
added = True
new_it_systems_submenu_from_generated.append(new_menu_item)
else:
new_it_systems_submenu_from_menu.append(item)
if not added:
new_it_systems_submenu_from_generated.append(new_menu_item)
new_it_systems_submenu_from_generated = sorted(new_it_systems_submenu_from_generated, key=lambda k:k['text'])
submenu['subitems'] = new_it_systems_submenu_from_menu + new_it_systems_submenu_from_generated
saveMenu(menu)
def updateReadmePage(readme_md, repo_name):
package_dir = "%s/%s" %(package_location, repo_name)
index_file = "%s/index.md" %package_dir
try:
os.stat(package_dir)
except:
os.mkdir(package_dir)
try:
os.remove(index_file)
except OSError:
pass
f = open(index_file, "w")
f.write(readme_md)
f.close()
def updateLandingPage(readme_title):
package_index_file = "%s/index.md" %package_location
f = open(package_index_file, "r")
lines = f.read().splitlines()
f.close()
before_placeholder = []
between_placeholder = []
after_placeholder = []
current_slot = before_placeholder
for line in lines:
if line == placeholder_end:
current_slot = after_placeholder
current_slot.append(line)
if line == placeholder_start:
current_slot = between_placeholder
between_placeholder.append("* %s" %readme_title)
between_placeholder.sort()
new_lines = before_placeholder + between_placeholder + after_placeholder
f = open(package_index_file, "w")
f.write("\n".join(new_lines))
f.close()
pass
def commitChanges():
pass
def update(repo_url):
repo_url = repo_url.split("\n")[0]
repo_name = repo_url.split('/')[-1]
readme_md = loadReadme(repo_url)
readme_md, readme_title = addTitleToReadme(readme_md, repo_url)
updateMenu(readme_md, repo_name)
updateReadmePage(readme_md, repo_name)
updateLandingPage(readme_title)
commitChanges()
if __name__ == '__main__':
for arg in sys.argv[1:]:
update(arg)
#git commit -m 'rebuild pages' --allow-empty
| zotya/zotya.github.io | tools/update_docs/update_package.py | Python | gpl-3.0 | 6,370 |
import uuid
import floto.decider
from floto.decider import Base
class Daemon(Base):
def __init__(self, *, domain, task_list=None, swf=None):
super().__init__(swf=swf)
self.task_list = task_list or 'floto_daemon'
self.domain = domain
def get_decisions(self):
signals = self.history.get_events_up_to_last_decision('WorkflowExecutionSignaled')
self.decisions.extend(self.get_decisions_child_workflows(signals))
def get_decisions_child_workflows(self, signal_events):
decisions = []
for signal in signal_events:
decision = self.get_decision_child_workflow(signal)
if decision: decisions.append(decision)
return decisions
def get_decision_child_workflow(self, signal_event):
decision = None
attributes = self.history.get_event_attributes(signal_event)
if attributes['signalName'] == 'startChildWorkflowExecution':
decision = self.get_decision_start_child_workflow_execution()
if 'input' in attributes:
input_ = floto.specs.JSONEncoder.load_string(attributes['input'])
if 'decider_spec' in input_:
json_decider_spec = floto.specs.JSONEncoder.dump_object(input_['decider_spec'])
decider_spec = self.get_decider_spec(json_decider_spec,
decision.task_list['name'],
self.domain)
self.start_child_decider(decider_spec)
return decision
def get_decision_start_child_workflow_execution(self):
# TODO: this is hardcoded, fix!
child_workflow_type = floto.api.WorkflowType(domain='d', name='child_workflow', version='v1')
child_workflow_id = str(uuid.uuid4())
child_workflow_task_list = 'task_list_{}'.format(child_workflow_id)
child_workflow = floto.decisions.StartChildWorkflowExecution()
child_workflow.workflow_id = child_workflow_id
child_workflow.workflow_type = child_workflow_type
child_workflow.task_list = {'name': child_workflow_task_list}
return child_workflow
def start_child_decider(self, decider_spec):
decider = floto.decider.Decider(decider_spec=decider_spec)
decider.run(separate_process=True)
def get_decider_spec(self, json_decider_spec, task_list, domain):
decider_spec = floto.specs.DeciderSpec.from_json(json_decider_spec)
#decider_spec.task_list = task_list
#decider_spec.domain = domain
return decider_spec
| babbel/floto | floto/decider/daemon.py | Python | mit | 2,624 |
from .Gender import Gender
from .Profile import Profile
from .Measurement import Measurement
from .MeasurementSource import MeasurementSource
from .MeasurementType import MeasurementType
from .MeasurementUnit import MeasurementUnit
| coco19/salud-api | app/mod_profiles/models/__init__.py | Python | gpl-2.0 | 232 |
"""One can load objects from different locations.
This module provides functionality to load objects from different locations
while preserving a simple interface to the consumer.
"""
import json
import os
import sys
def identity(object_):
""":return: the argument
:param object_: the object to be returned"""
return object_
def true(_):
""":return: :obj:`True`
:param _: can be ignored"""
return True
class PathLoader(object):
"""Load paths and folders from the local file system.
The :paramref:`process <PathLoader.__init__.process>` is called with a
:class:`path <str>` as first argument: ``process(path)``.
"""
def __init__(self, process=identity, chooses_path=true):
"""Create a PathLoader object.
:param process: ``process(path)`` is called with the `path` to load.
The result of :paramref:`process` is returned to the caller. The
default value is :func:`identity`, so the paths are returned when
loaded.
:param chooses_path: ``chooses_path(path)`` is called before
:paramref:`process` and returns :obj:`True` or :obj:`False`
depending on whether a specific path should be loaded and passed to
:paramref:`process`.
"""
self._process = process
self._chooses_path = chooses_path
def folder(self, folder):
"""Load all files from a folder recursively.
Depending on :meth:`chooses_path` some paths may not be loaded.
Every loaded path is processed and returned part of the returned list.
:param str folder: the folder to load the files from
:rtype: list
:return: a list of the results of the processing steps of the loaded
files
"""
result = []
for root, _, files in os.walk(folder):
for file in files:
path = os.path.join(root, file)
if self._chooses_path(path):
result.append(self.path(path))
return result
def chooses_path(self, path):
""":return: whether the path should be loaded
:rtype: bool
:param str path: the path to the file to be tested
"""
return self._chooses_path(path)
def path(self, path):
"""load a :paramref:`path` and return the processed result
:param str path: the path to the file to be processed
:return: the result of processing step
"""
return self._process(path)
def _relative_to_absolute(self, module_location, folder):
""":return: the absolute path for the `folder` relative to
the module_location.
:rtype: str
"""
if os.path.isfile(module_location):
path = os.path.dirname(module_location)
elif os.path.isdir(module_location):
path = module_location
else:
module_folder = os.path.dirname(module_location)
if module_folder:
path = module_folder
else:
__import__(module_location)
module = sys.modules[module_location]
path = os.path.dirname(module.__file__)
absolute_path = os.path.join(path, folder)
return absolute_path
def relative_folder(self, module, folder):
"""Load a folder located relative to a module and return the processed
result.
:param str module: can be
- a path to a folder
- a path to a file
- a module name
:param str folder: the path of a folder relative to :paramref:`module`
:return: a list of the results of the processing
:rtype: list
Depending on :meth:`chooses_path` some paths may not be loaded.
Every loaded path is processed and returned part of the returned list.
You can use :meth:`choose_paths` to find out which paths are chosen to
load.
"""
folder = self._relative_to_absolute(module, folder)
return self.folder(folder)
def relative_file(self, module, file):
"""Load a file relative to a module.
:param str module: can be
- a path to a folder
- a path to a file
- a module name
:param str folder: the path of a folder relative to :paramref:`module`
:return: the result of the processing
"""
path = self._relative_to_absolute(module, file)
return self.path(path)
def choose_paths(self, paths):
""":return: the paths that are chosen by :meth:`chooses_path`
:rtype: list
"""
return [path for path in paths if self._chooses_path(path)]
def example(self, relative_path):
"""Load an example from the knitting pattern examples.
:param str relative_path: the path to load
:return: the result of the processing
You can use :meth:`knittingpattern.Loader.PathLoader.examples`
to find out the paths of all examples.
"""
example_path = os.path.join("examples", relative_path)
return self.relative_file(__file__, example_path)
def examples(self):
"""Load all examples form the examples folder of this packge.
:return: a list of processed examples
:rtype: list
Depending on :meth:`chooses_path` some paths may not be loaded.
Every loaded path is processed and returned part of the returned list.
"""
return self.relative_folder(__file__, "examples")
class ContentLoader(PathLoader):
"""Load contents of files and ressources.
The :paramref:`process <PathLoader.__init__.process>` is called with a
:class:`string <str>` as first argument: ``process(string)``.
"""
def string(self, string):
""":return: the processed result of a string
:param str string: the string to load the ocntent from
"""
return self._process(string)
def file(self, file):
""":return: the processed result of the content of a file-like object.
:param file: the file-like object to load the content from.
It should support the ``read`` method.
"""
string = file.read()
return self.string(string)
def path(self, path):
""":return: the processed result of a :paramref:`path's <path>` content.
:param str path: the path where to load the content from.
It should exist on the local file system.
"""
with open(path) as file:
return self.file(file)
def url(self, url, encoding="UTF-8"):
"""load and process the content behind a url
:return: the processed result of the :paramref:`url's <url>` content
:param str url: the url to retrieve the content from
:param str encoding: the encoding of the retrieved content.
The default encoding is UTF-8.
"""
import urllib.request
with urllib.request.urlopen(url) as file:
webpage_content = file.read()
webpage_content = webpage_content.decode(encoding)
return self.string(webpage_content)
class JSONLoader(ContentLoader):
"""Load an process JSON from various locations.
The :paramref:`process <PathLoader.__init__.process>` is called with an
:class:`object` as first argument: ``process(object)``.
"""
def object(self, object_):
"""Processes an already loaded object.
:return: the result of the processing step
:param object: the object to be loaded
"""
return self._process(object_)
def string(self, string):
"""Load an object from a string and return the processed JSON content
:return: the result of the processing step
:param str string: the string to load the JSON from
"""
object_ = json.loads(string)
return self.object(object_)
__all__ = ["JSONLoader", "ContentLoader", "PathLoader", "true", "identity"]
| AllYarnsAreBeautiful/knittingpattern | knittingpattern/Loader.py | Python | lgpl-3.0 | 7,981 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import hashlib
from datetime import datetime
from urllib import urlencode
from odoo import api, fields, models
class Users(models.Model):
_inherit = 'res.users'
def __init__(self, pool, cr):
init_res = super(Users, self).__init__(pool, cr)
type(self).SELF_WRITEABLE_FIELDS = list(
set(
self.SELF_WRITEABLE_FIELDS +
['country_id', 'city', 'website', 'website_description', 'website_published']))
return init_res
create_date = fields.Datetime('Create Date', readonly=True, copy=False, index=True)
karma = fields.Integer('Karma', default=0)
badge_ids = fields.One2many('gamification.badge.user', 'user_id', string='Badges', copy=False)
gold_badge = fields.Integer('Gold badges count', compute="_get_user_badge_level")
silver_badge = fields.Integer('Silver badges count', compute="_get_user_badge_level")
bronze_badge = fields.Integer('Bronze badges count', compute="_get_user_badge_level")
forum_waiting_posts_count = fields.Integer('Waiting post', compute="_get_user_waiting_post")
@api.multi
@api.depends('badge_ids')
def _get_user_badge_level(self):
""" Return total badge per level of users
TDE CLEANME: shouldn't check type is forum ? """
for user in self:
user.gold_badge = 0
user.silver_badge = 0
user.bronze_badge = 0
self.env.cr.execute("""
SELECT bu.user_id, b.level, count(1)
FROM gamification_badge_user bu, gamification_badge b
WHERE bu.user_id IN %s
AND bu.badge_id = b.id
AND b.level IS NOT NULL
GROUP BY bu.user_id, b.level
ORDER BY bu.user_id;
""", [tuple(self.ids)])
for (user_id, level, count) in self.env.cr.fetchall():
# levels are gold, silver, bronze but fields have _badge postfix
self.browse(user_id)['{}_badge'.format(level)] = count
@api.multi
def _get_user_waiting_post(self):
for user in self:
Post = self.env['forum.post']
domain = [('parent_id', '=', False), ('state', '=', 'pending'), ('create_uid', '=', user.id)]
user.forum_waiting_posts_count = Post.search_count(domain)
@api.model
def _generate_forum_token(self, user_id, email):
"""Return a token for email validation. This token is valid for the day
and is a hash based on a (secret) uuid generated by the forum module,
the user_id, the email and currently the day (to be updated if necessary). """
forum_uuid = self.env['ir.config_parameter'].sudo().get_param('website_forum.uuid')
return hashlib.sha256('%s-%s-%s-%s' % (
datetime.now().replace(hour=0, minute=0, second=0, microsecond=0),
forum_uuid,
user_id,
email)).hexdigest()
@api.one
def send_forum_validation_email(self, forum_id=None):
if not self.email:
return False
token = self._generate_forum_token(self.id, self.email)
activation_template = self.env.ref('website_forum.validation_email')
if activation_template:
params = {
'token': token,
'id': self.id,
'email': self.email}
if forum_id:
params['forum_id'] = forum_id
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
token_url = base_url + '/forum/validate_email?%s' % urlencode(params)
activation_template.sudo().with_context(token_url=token_url).send_mail(self.id, force_send=True)
return True
@api.one
def process_forum_validation_token(self, token, email, forum_id=None, context=None):
validation_token = self._generate_forum_token(self.id, email)
if token == validation_token and self.karma == 0:
karma = 3
forum = None
if forum_id:
forum = self.env['forum.forum'].browse(forum_id)
else:
forum_ids = self.env['forum.forum'].search([], limit=1)
if forum_ids:
forum = forum_ids[0]
if forum:
# karma gained: karma to ask a question and have 2 downvotes
karma = forum.karma_ask + (-2 * forum.karma_gen_question_downvote)
return self.write({'karma': karma})
return False
@api.multi
def add_karma(self, karma):
for user in self:
user.karma += karma
return True
# Wrapper for call_kw with inherits
@api.multi
def open_website_url(self):
return self.mapped('partner_id').open_website_url()
| chienlieu2017/it_management | odoo/addons/website_forum/models/res_users.py | Python | gpl-3.0 | 4,824 |
"""Creating a one-to-many relationship for notifications
Revision ID: 4dc5ddd111b8
Revises: 4c8915e461b3
Create Date: 2015-07-24 15:02:04.398262
"""
# revision identifiers, used by Alembic.
revision = '4dc5ddd111b8'
down_revision = '4c8915e461b3'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('certificate_notification_associations',
sa.Column('notification_id', sa.Integer(), nullable=True),
sa.Column('certificate_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ondelete='cascade'),
sa.ForeignKeyConstraint(['notification_id'], ['notifications.id'], ondelete='cascade')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('certificate_notification_associations')
### end Alembic commands ###
| stinkydan/lemur | lemur/migrations/versions/4dc5ddd111b8_.py | Python | apache-2.0 | 960 |
score1 = 0
score2 = 0
tiecount = 0
player_1 = raw_input("Player 1, what's your name?")
player_2 = raw_input("Player 2?")
import random
def shuffled_deck():
deck = range(2,53)
random.shuffle(deck)
return deck
deck = shuffled_deck()
def player_turn(player_name):
card = deck.pop()
print player_name + " drew a " + str(card)
return str(card)
while deck:
if player_turn(player_1) == player_turn(player_2):
print "It's a tie!"
tiecount = tiecount + 1
elif player_turn(player_1) > player_turn(player_2):
score1 = score1 + 1
print "Player 1 Wins"
elif player_turn(player_1) < player_turn(player_2):
score2 = score2 + 1
print "It's a tie!"
print score1
print score2
print tiecount | bensk/CS9 | _site/Code Examples/April12WarWork.py | Python | mit | 765 |
# Metric Space from Wordnet
# Main output function: Guess Universe and provide distance
from nltk.corpus import wordnet
def find_synonyms_antonyms(word):
synonyms = []
antonyms = []
for syn in wordnet.synsets(word):
for l in syn.lemmas():
synonyms.append(l.name())
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
print(set(synonyms))
print(set(antonyms))
#synonyms = wordnet.synsets(word)
#lemmas = set(chain.from_iterable([w.lemma_names() for w in synonyms]))
#print synonyms
#print lemmas
return synonyms, antonyms
def find_distance(w1,w2):
w1= wn.synset('.n.01')
def guess_universe(word):
syn, ant = find_synonyms_antonyms(word)
return syn
# guess universe based on personalized knowledge
def main():
univ = guess_universe('river')
print univ
if __name__ == "__main__":
main()
'''
keys = ['river', 'pond', 'sea', 'lake', '']
d = dict.fromkeys(keys, None)
d['river'] = ['icy','passable', 'fresh', 'swollen', 'fluvial', 'freshwater', 'fish', 'water plants', 'frozen', 'leading to the sea', 'pollution', 'bank', 'sand', 'lots of water','waves','long','meadering','winding', 'Allegany','reflections','natural']
d['pond'] =['icy','calm','fish', 'stagnant', 'water plants', 'freshwater', 'pollution', 'frozen', 'eutrophication', 'lots of water','waves','not too big','reflections','isolated','natural', 'schenley park', 'static', 'natural']
#d['']=['I like it', 'it is everywhere', 'beautiful','I did not see it yesterday','I want to go', 'strange']
d['both'] = list(set(d['pond'])&set(d['river']))
d['only river'] = d['river'] - d['both']
d['only pond'] = d['pond'] - d['both']
def generator(word):
def createGenerator(l):
mylist = l
for i in mylist:
yield i
mygenerator = createGenerator(d[word]) # create a generator
for i in mygenerator:
'''
| zhongzho/starter-python-bot | bot/distance_WordNet.py | Python | mit | 1,944 |
# Copyright 2008, Red Hat, Inc
# Luke Macken <lmacken@redhat.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import func_module
import sub_process
class SysctlModule(func_module.FuncModule):
version = "0.0.1"
description = "Configure kernel parameters at runtime"
def __run(self, cmd):
cmd = sub_process.Popen(cmd.split(), stdout=sub_process.PIPE,
stderr=sub_process.PIPE, shell=False,
close_fds=True)
return [line for line in cmd.communicate()[0].strip().split('\n')]
def list(self):
return self.__run("/sbin/sysctl -a")
def get(self, name):
return self.__run("/sbin/sysctl -n %s" % name)
def set(self, name, value):
return self.__run("/sbin/sysctl -w %s=%s" % (name, value))
def grep(self, word):
"""
Grep info from sysctl
"""
results = {self.list:[]}
sys_res = self.list()#the ist fo sysctl
for res in sys_res:
if res.lower().find(word)!=-1:
results[self.list].append(res)
return results
grep = func_module.findout(grep)
def register_method_args(self):
"""
Implementing the method argument getter
"""
return {
'list':{
'args':{},
'description':"Display all values currently available."
},
'get':{
'args':{
'name':{
'type':'string',
'optional':False,
'description':"The name of a key to read from. An example is kernel.ostype"
}
},
'description':"Use this option to disable printing of the key name when printing values"
},
'set':{
'args':{
'name':{
'type':'string',
'optional':False,
'description':"The name of a key to read from. An example is kernel.ostype"
},
'value':{
'type':'string',
'optional':False,
'description':"The name value to be set."
}
},
'description':"Use this option when you want to change a sysctl setting"
}
}
| dockerera/func | func/minion/modules/sysctl.py | Python | gpl-2.0 | 2,855 |
import itertools
from sympy.core.sympify import _sympify
from sympy.core.compatibility import default_sort_key
from sympy import Expr, Add, Mul, S, Integral, Eq, Sum, Symbol, Dummy, Basic
from sympy.core.evaluate import global_evaluate
from sympy.stats import variance, covariance
from sympy.stats.rv import RandomSymbol, probability, expectation
__all__ = ['Probability', 'Expectation', 'Variance', 'Covariance']
class Probability(Expr):
"""
Symbolic expression for the probability.
Examples
========
>>> from sympy.stats import Probability, Normal
>>> from sympy import Integral
>>> X = Normal("X", 0, 1)
>>> prob = Probability(X > 1)
>>> prob
Probability(X > 1)
Integral representation:
>>> prob.rewrite(Integral)
Integral(sqrt(2)*exp(-_z**2/2)/(2*sqrt(pi)), (_z, 1, oo))
Evaluation of the integral:
>>> prob.evaluate_integral()
sqrt(2)*(-sqrt(2)*sqrt(pi)*erf(sqrt(2)/2) + sqrt(2)*sqrt(pi))/(4*sqrt(pi))
"""
def __new__(cls, prob, condition=None, **kwargs):
prob = _sympify(prob)
if condition is None:
obj = Expr.__new__(cls, prob)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, prob, condition)
obj._condition = condition
return obj
def _eval_rewrite_as_Integral(self, arg, condition=None):
return probability(arg, condition, evaluate=False)
def _eval_rewrite_as_Sum(self, arg, condition=None):
return self.rewrite(Integral)
def evaluate_integral(self):
return self.rewrite(Integral).doit()
class Expectation(Expr):
"""
Symbolic expression for the expectation.
Examples
========
>>> from sympy.stats import Expectation, Normal, Probability
>>> from sympy import symbols, Integral
>>> mu = symbols("mu")
>>> sigma = symbols("sigma", positive=True)
>>> X = Normal("X", mu, sigma)
>>> Expectation(X)
Expectation(X)
>>> Expectation(X).evaluate_integral().simplify()
mu
To get the integral expression of the expectation:
>>> Expectation(X).rewrite(Integral)
Integral(sqrt(2)*X*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo))
The same integral expression, in more abstract terms:
>>> Expectation(X).rewrite(Probability)
Integral(x*Probability(Eq(X, x)), (x, -oo, oo))
This class is aware of some properties of the expectation:
>>> from sympy.abc import a
>>> Expectation(a*X)
Expectation(a*X)
>>> Y = Normal("Y", 0, 1)
>>> Expectation(X + Y)
Expectation(X + Y)
To expand the ``Expectation`` into its expression, use ``doit()``:
>>> Expectation(X + Y).doit()
Expectation(X) + Expectation(Y)
>>> Expectation(a*X + Y).doit()
a*Expectation(X) + Expectation(Y)
>>> Expectation(a*X + Y)
Expectation(a*X + Y)
"""
def __new__(cls, expr, condition=None, **kwargs):
expr = _sympify(expr)
if condition is None:
if not expr.has(RandomSymbol):
return expr
obj = Expr.__new__(cls, expr)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, expr, condition)
obj._condition = condition
return obj
def doit(self, **hints):
expr = self.args[0]
condition = self._condition
if not expr.has(RandomSymbol):
return expr
if isinstance(expr, Add):
return Add(*[Expectation(a, condition=condition).doit() for a in expr.args])
elif isinstance(expr, Mul):
rv = []
nonrv = []
for a in expr.args:
if isinstance(a, RandomSymbol) or a.has(RandomSymbol):
rv.append(a)
else:
nonrv.append(a)
return Mul(*nonrv)*Expectation(Mul(*rv), condition=condition)
return self
def _eval_rewrite_as_Probability(self, arg, condition=None):
rvs = arg.atoms(RandomSymbol)
if len(rvs) > 1:
raise NotImplementedError()
if len(rvs) == 0:
return arg
rv = rvs.pop()
if rv.pspace is None:
raise ValueError("Probability space not known")
symbol = rv.symbol
if symbol.name[0].isupper():
symbol = Symbol(symbol.name.lower())
else :
symbol = Symbol(symbol.name + "_1")
if rv.pspace.is_Continuous:
return Integral(arg.replace(rv, symbol)*Probability(Eq(rv, symbol), condition), (symbol, rv.pspace.domain.set.inf, rv.pspace.domain.set.sup))
else:
if rv.pspace.is_Finite:
raise NotImplementedError
else:
return Sum(arg.replace(rv, symbol)*Probability(Eq(rv, symbol), condition), (symbol, rv.pspace.domain.set.inf, rv.pspace.set.sup))
def _eval_rewrite_as_Integral(self, arg, condition=None):
return expectation(arg, condition=condition, evaluate=False)
def _eval_rewrite_as_Sum(self, arg, condition=None):
return self.rewrite(Integral)
def evaluate_integral(self):
return self.rewrite(Integral).doit()
class Variance(Expr):
"""
Symbolic expression for the variance.
Examples
========
>>> from sympy import symbols, Integral
>>> from sympy.stats import Normal, Expectation, Variance, Probability
>>> mu = symbols("mu", positive=True)
>>> sigma = symbols("sigma", positive=True)
>>> X = Normal("X", mu, sigma)
>>> Variance(X)
Variance(X)
>>> Variance(X).evaluate_integral()
sigma**2
Integral representation of the underlying calculations:
>>> Variance(X).rewrite(Integral)
Integral(sqrt(2)*(X - Integral(sqrt(2)*X*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo)))**2*exp(-(X - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (X, -oo, oo))
Integral representation, without expanding the PDF:
>>> Variance(X).rewrite(Probability)
-Integral(x*Probability(Eq(X, x)), (x, -oo, oo))**2 + Integral(x**2*Probability(Eq(X, x)), (x, -oo, oo))
Rewrite the variance in terms of the expectation
>>> Variance(X).rewrite(Expectation)
-Expectation(X)**2 + Expectation(X**2)
Some transformations based on the properties of the variance may happen:
>>> from sympy.abc import a
>>> Y = Normal("Y", 0, 1)
>>> Variance(a*X)
Variance(a*X)
To expand the variance in its expression, use ``doit()``:
>>> Variance(a*X).doit()
a**2*Variance(X)
>>> Variance(X + Y)
Variance(X + Y)
>>> Variance(X + Y).doit()
2*Covariance(X, Y) + Variance(X) + Variance(Y)
"""
def __new__(cls, arg, condition=None, **kwargs):
arg = _sympify(arg)
if condition is None:
obj = Expr.__new__(cls, arg)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, arg, condition)
obj._condition = condition
return obj
def doit(self, **hints):
arg = self.args[0]
condition = self._condition
if not arg.has(RandomSymbol):
return S.Zero
if isinstance(arg, RandomSymbol):
return self
elif isinstance(arg, Add):
rv = []
for a in arg.args:
if a.has(RandomSymbol):
rv.append(a)
variances = Add(*map(lambda xv: Variance(xv, condition).doit(), rv))
map_to_covar = lambda x: 2*Covariance(*x, condition=condition).doit()
covariances = Add(*map(map_to_covar, itertools.combinations(rv, 2)))
return variances + covariances
elif isinstance(arg, Mul):
nonrv = []
rv = []
for a in arg.args:
if a.has(RandomSymbol):
rv.append(a)
else:
nonrv.append(a**2)
if len(rv) == 0:
return S.Zero
return Mul(*nonrv)*Variance(Mul(*rv), condition)
# this expression contains a RandomSymbol somehow:
return self
def _eval_rewrite_as_Expectation(self, arg, condition=None):
e1 = Expectation(arg**2, condition)
e2 = Expectation(arg, condition)**2
return e1 - e2
def _eval_rewrite_as_Probability(self, arg, condition=None):
return self.rewrite(Expectation).rewrite(Probability)
def _eval_rewrite_as_Integral(self, arg, condition=None):
return variance(self.args[0], self._condition, evaluate=False)
def _eval_rewrite_as_Sum(self, arg, condition=None):
return self.rewrite(Integral)
def evaluate_integral(self):
return self.rewrite(Integral).doit()
class Covariance(Expr):
"""
Symbolic expression for the covariance.
Examples
========
>>> from sympy.stats import Covariance
>>> from sympy.stats import Normal
>>> X = Normal("X", 3, 2)
>>> Y = Normal("Y", 0, 1)
>>> Z = Normal("Z", 0, 1)
>>> W = Normal("W", 0, 1)
>>> cexpr = Covariance(X, Y)
>>> cexpr
Covariance(X, Y)
Evaluate the covariance, `X` and `Y` are independent,
therefore zero is the result:
>>> cexpr.evaluate_integral()
0
Rewrite the covariance expression in terms of expectations:
>>> from sympy.stats import Expectation
>>> cexpr.rewrite(Expectation)
Expectation(X*Y) - Expectation(X)*Expectation(Y)
In order to expand the argument, use ``doit()``:
>>> from sympy.abc import a, b, c, d
>>> Covariance(a*X + b*Y, c*Z + d*W)
Covariance(a*X + b*Y, c*Z + d*W)
>>> Covariance(a*X + b*Y, c*Z + d*W).doit()
a*c*Covariance(X, Z) + a*d*Covariance(W, X) + b*c*Covariance(Y, Z) + b*d*Covariance(W, Y)
This class is aware of some properties of the covariance:
>>> Covariance(X, X).doit()
Variance(X)
>>> Covariance(a*X, b*Y).doit()
a*b*Covariance(X, Y)
"""
def __new__(cls, arg1, arg2, condition=None, **kwargs):
arg1 = _sympify(arg1)
arg2 = _sympify(arg2)
if kwargs.pop('evaluate', global_evaluate[0]):
arg1, arg2 = sorted([arg1, arg2], key=default_sort_key)
if condition is None:
obj = Expr.__new__(cls, arg1, arg2)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, arg1, arg2, condition)
obj._condition = condition
return obj
def doit(self, **hints):
arg1 = self.args[0]
arg2 = self.args[1]
condition = self._condition
if arg1 == arg2:
return Variance(arg1, condition).doit()
if not arg1.has(RandomSymbol):
return S.Zero
if not arg2.has(RandomSymbol):
return S.Zero
arg1, arg2 = sorted([arg1, arg2], key=default_sort_key)
if isinstance(arg1, RandomSymbol) and isinstance(arg2, RandomSymbol):
return Covariance(arg1, arg2, condition)
coeff_rv_list1 = self._expand_single_argument(arg1.expand())
coeff_rv_list2 = self._expand_single_argument(arg2.expand())
addends = [a*b*Covariance(*sorted([r1, r2], key=default_sort_key), condition=condition)
for (a, r1) in coeff_rv_list1 for (b, r2) in coeff_rv_list2]
return Add(*addends)
@classmethod
def _expand_single_argument(cls, expr):
# return (coefficient, random_symbol) pairs:
if isinstance(expr, RandomSymbol):
return [(S.One, expr)]
elif isinstance(expr, Add):
outval = []
for a in expr.args:
if isinstance(a, Mul):
outval.append(cls._get_mul_nonrv_rv_tuple(a))
elif isinstance(a, RandomSymbol):
outval.append((S.One, a))
return outval
elif isinstance(expr, Mul):
return [cls._get_mul_nonrv_rv_tuple(expr)]
elif expr.has(RandomSymbol):
return [(S.One, expr)]
@classmethod
def _get_mul_nonrv_rv_tuple(cls, m):
rv = []
nonrv = []
for a in m.args:
if a.has(RandomSymbol):
rv.append(a)
else:
nonrv.append(a)
return (Mul(*nonrv), Mul(*rv))
def _eval_rewrite_as_Expectation(self, arg1, arg2, condition=None):
e1 = Expectation(arg1*arg2, condition)
e2 = Expectation(arg1, condition)*Expectation(arg2, condition)
return e1 - e2
def _eval_rewrite_as_Probability(self, arg1, arg2, condition=None):
return self.rewrite(Expectation).rewrite(Probability)
def _eval_rewrite_as_Integral(self, arg1, arg2, condition=None):
return covariance(self.args[0], self.args[1], self._condition, evaluate=False)
def _eval_rewrite_as_Sum(self, arg1, arg2, condition=None):
return self.rewrite(Integral)
def evaluate_integral(self):
return self.rewrite(Integral).doit()
| wxgeo/geophar | wxgeometrie/sympy/stats/symbolic_probability.py | Python | gpl-2.0 | 12,946 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.