repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
pydawan/protetores_bucais | protetores_bucais/apps/destaques/migrations/0001_initial.py | Python | mit | 843 | 0.004745 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Destaque',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created= | True, primary_key=True)),
('titulo', models.CharField(max_length=100, verbose_name=b'T\xc3\xadtulo')),
('descricao', models.TextField(verbose_name=b'Descri\xc3\xa7\xc3\xa3o', blank=True)),
('imagem', models.ImageField(upload_to=b'imagens/destaques', blank=True)),
],
options={
'verbose_name': 'Destaque',
'verbose_name_plural': 'Destaques',
},
),
| ]
|
krafczyk/spack | var/spack/repos/builtin/packages/gtkmm/package.py | Python | lgpl-2.1 | 2,394 | 0.000835 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gtkmm(AutotoolsPackage):
"""Gtkmm is the official C++ interface for the popular GUI library GTK+."""
homepage = "https://www.gtkmm.org/en/"
url = "https://ftp.acc.umu.se/pub/GNOME/sources/gtkmm/2.16/gtkmm-2.16.0.tar.gz"
version('2.19.7', '2afc018e5b15cde293cd2d21db9b6a55')
| version('2.19.6', 'fb140e82e583620defe0d70bfe7eefd7')
version('2.19.4', '60006a23306487938dfe0e4b17e3fa46')
version('2.19.2', 'dc208575a24e8d5265af2fd59c08f3d8')
version('2.17.11', '2326ff83439aac83721ed4694acf14e5')
version('2.17.1', '19358644e5e620ad738658be2cb6d739')
versi | on('2.16.0', 'de178c2a6f23eda0b6a8bfb0219e2e1c')
version('2.4.11', 'a339958bc4ab7f74201b312bd3562d46')
depends_on('glibmm')
depends_on('atk')
depends_on('gtkplus')
depends_on('pangomm')
depends_on('cairomm')
def url_for_version(self, version):
"""Handle glib's version-based custom URLs."""
url = "https://ftp.acc.umu.se/pub/GNOME/sources/gtkmm"
ext = '.tar.gz' if version < Version('3.1.0') else '.tar.xz'
return url + "/%s/gtkmm-%s%s" % (version.up_to(2), version, ext)
|
teoreteetik/api-snippets | monitor/events/list-get-example-sourceipaddress-filter/list-get-example-sourceipaddress-filter.6.x.py | Python | mit | 552 | 0 | # Download the | Python helper library from twilio.com/docs/python/install
from datetime import datetime
import pytz
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACCOUNT_SID"
auth_token = "your_auth_token"
client = Client(account | _sid, auth_token)
events = client.monitor.events.list(
source_ip_address="104.14.155.29",
start_date=datetime(2015, 4, 25, tzinfo=pytz.UTC),
end_date=datetime(2015, 4, 25, 23, 59, 59, tzinfo=pytz.UTC)
)
for e in events:
print(e.description)
|
Taapat/enigma2-openpli-vuplus | lib/python/Plugins/SystemPlugins/AnimationSetup/plugin.py | Python | gpl-2.0 | 9,065 | 0.030667 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.MenuList import MenuList
from Components.Sources.StaticText import StaticText
from Components.config import config, ConfigNumber, ConfigSelection, ConfigSelectionNumber, getConfigListEntry
from Plugins.Plugin import PluginDescriptor
from enigma import setAnimation_current, setAnimation_speed, setAnimation_current_listbox
g_animation_paused = False
g_orig_show = None
g_orig_doClose = None
config.misc.window_animation_default = ConfigNumber(default = 6)
config.misc.window_animation_speed = ConfigSelectionNumber(1, 30, 1, default = 20)
config.misc.listbox_animation_default = ConfigSelection(default = "0", choices = [("0", _("Disable")), ("1", _("Enable")), ("2", _("Same behavior as current animation"))])
class AnimationSetupConfig(ConfigListScreen, Screen):
skin = """
<screen position="center,center" size="600,140" title="Animation Settings">
<widget name="config" position="0,0" size="600,100" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/buttons/red.png" position="0,100" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,100" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,100" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,100" zPosition="1" size="140,40" \
font="Regular;20" halign="center" valign="center" transparent="1" />
<widget source="key_green" render="Label" position="140,100" zPosition="1" size="140,40" \
font="Regular;20" halign="center" valign="center" transparent="1" />
<widget source="key_yellow" render="Label" position="280,100" zPosition="1" size="140,40" \
font="Regular;20" halign="center" valign="center" transparent="1" />
</screen>
"""
def __init__(self, session):
self.session = session
self.entrylist = []
Screen.__init__(self, session)
ConfigListScreen.__init__(self, self.entrylist)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions",], {
"ok" : self.keyGreen,
"green" : self.keyGreen,
"yellow" : self.keyYellow,
"red" : self.keyRed,
"cancel" : self.keyRed,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] | = StaticText(_("Default"))
self.makeConfigList()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_('Animation Setup'))
def keyGreen(self):
co | nfig.misc.window_animation_speed.save()
setAnimation_speed(int(config.misc.window_animation_speed.value))
setAnimation_speed(int(config.misc.window_animation_speed.value))
config.misc.listbox_animation_default.save()
setAnimation_current_listbox(int(config.misc.listbox_animation_default.value))
self.close()
def keyRed(self):
config.misc.window_animation_speed.cancel()
config.misc.listbox_animation_default.cancel()
self.close()
def keyYellow(self):
config.misc.window_animation_speed.value = 20
config.misc.listbox_animation_default.value = "0"
self.makeConfigList()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def makeConfigList(self):
self.entrylist = []
self.entrylist.append(getConfigListEntry(_("Animation Speed"), config.misc.window_animation_speed))
self.entrylist.append(getConfigListEntry(_("Enable Focus Animation"), config.misc.listbox_animation_default))
self["config"].list = self.entrylist
self["config"].l.setList(self.entrylist)
class AnimationSetupScreen(Screen):
animationSetupItems = [
{"idx":0, "name":_("Disable Animations")},
{"idx":1, "name":_("Simple fade")},
{"idx":2, "name":_("Grow drop")},
{"idx":3, "name":_("Grow from left")},
{"idx":4, "name":_("Popup")},
{"idx":5, "name":_("Slide drop")},
{"idx":6, "name":_("Slide left to right")},
{"idx":7, "name":_("Slide top to bottom")},
{"idx":8, "name":_("Stripes")},
]
skin = """
<screen name="AnimationSetup" position="center,center" size="580,400" title="Animation Setup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" \
font="Regular;20" halign="center" valign="center" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" \
font="Regular;20" halign="center" valign="center" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" \
font="Regular;20" halign="center" valign="center" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" \
font="Regular;20" halign="center" valign="center" transparent="1" />
<widget name="list" position="10,60" size="560,364" scrollbarMode="showOnDemand" />
<widget source="introduction" render="Label" position="0,370" size="560,40" \
font="Regular;20" valign="center" transparent="1" />
</screen>
"""
def __init__(self, session):
self.skin = AnimationSetupScreen.skin
Screen.__init__(self, session)
self.animationList = []
self["introduction"] = StaticText(_("* current animation"))
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] = StaticText(_("Settings"))
self["key_blue"] = StaticText(_("Preview"))
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"cancel": self.keyclose,
"save": self.ok,
"ok" : self.ok,
"yellow": self.config,
"blue": self.preview
}, -3)
self["list"] = MenuList(self.animationList)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
l = []
for x in self.animationSetupItems:
key = x.get("idx", 0)
name = x.get("name", "??")
if key == config.misc.window_animation_default.value:
name = "* %s" % (name)
l.append( (name, key) )
self["list"].setList(l)
def ok(self):
current = self["list"].getCurrent()
if current:
key = current[1]
config.misc.window_animation_default.value = key
config.misc.window_animation_default.save()
setAnimation_current(key)
setAnimation_current_listbox(int(config.misc.listbox_animation_default.value))
self.close()
def keyclose(self):
setAnimation_current(config.misc.window_animation_default.value)
setAnimation_speed(int(config.misc.window_animation_speed.value))
setAnimation_current_listbox(int(config.misc.listbox_animation_default.value))
self.close()
def config(self):
self.session.open(AnimationSetupConfig)
def preview(self):
current = self["list"].getCurrent()
if current:
global g_animation_paused
tmp = g_animation_paused
g_animation_paused = False
setAnimation_current(current[1])
self.session.open(MessageBox, current[0], MessageBox.TYPE_INFO, timeout=3)
g_animation_paused = tmp
def checkAttrib(self, paused):
if g_animation_paused is paused:
try:
for (attr, value) in self.skinAttributes:
if attr == "animationPaused" and value in ("1", "on"):
return True
except:
pass
return False
def screen_show(self):
global g_animation_paused
if g_animation_paused:
setAnimation_current(0)
g_orig_show(self)
if checkAttrib(self, False):
g_animation_paused = True
def screen_doClose(self):
global g_animation_paused
if checkAttrib(self, True):
g_animation_paused = False
setAnimation_current(config.misc.window_animation_default.value)
g_orig_doClose(self)
def animationSetupMain(session, **kwargs):
session.open(AnimationSetupScreen)
def startAnimationSetup(menuid):
if menuid == "system":
return [( _("Animations" |
sfu-fas/coursys | ra/migrations/0017_add_processor_field.py | Python | gpl-3.0 | 636 | 0.001572 | # Generated by Django 2.2.15 on 2021-10-07 12:02
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
import ra.models
class Migration(migrations.Migration):
dependencies = [
('coredata', '0025_update_choices'),
('ra', '0016_ | add_drafts'),
]
operations = [
migrations.AddField(
model_name='rarequest',
name='processor',
field=models.ForeignKey(default=None, editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='rarequest_processo | r', to='coredata.Person'),
),
]
|
jakobharlan/avango | avango-display/python/avango/display/setups/AutoStereoDisplay.py | Python | lgpl-3.0 | 2,055 | 0.000973 | # -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################### | #######################
import avango.display
class AutoStereoDisplay(avango.display.Display):
def __init__(self, inspector | , options):
super(AutoStereoDisplay, self).__init__("AutoStereoDisplay", inspector)
window = self.make_window(0, 0, 1200, 1600, 0.33, 0.43, True)
window.Name.value = ""
self.add_window(window, avango.osg.make_trans_mat(0, 1.7, -0.7), 0)
user = avango.display.nodes.User()
user.Matrix.value = avango.osg.make_trans_mat(avango.osg.Vec3(0., 1.7, 0.))
self.add_user(user)
|
willingc/vms | vms/volunteer/views.py | Python | gpl-2.0 | 7,224 | 0.004291 | import os
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.servers.basehttp import FileWrapper
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.views.generic.detail import DetailView
from django.views.generic import ListView
from braces.views import LoginRequiredMixin, AnonymousRequiredMixin
from organization.services import *
from shift.services import *
from event.services import get_signed_up_events_for_volunteer
from job.services import get_signed_up_jobs_for_volunteer
from volunteer.forms import ReportForm, SearchVolunteerForm, VolunteerForm
from django.views.generic.edit import FormView, UpdateView
from volunteer | .models import Volunteer
from volunteer.services import *
from volunteer.validation import validate_file
from django.views.generic import View
from django.core.urlresolvers import reverse_lazy
@login_required
def download_resume(request, volunteer_id):
| user = request.user
if int(user.volunteer.id) == int(volunteer_id):
if request.method == 'POST':
basename = get_volunteer_resume_file_url(volunteer_id)
if basename:
filename = settings.MEDIA_ROOT + basename
wrapper = FileWrapper(file(filename))
response = HttpResponse(wrapper)
response['Content-Disposition'] = 'attachment; filename=%s' % os.path.basename(filename)
response['Content-Length'] = os.path.getsize(filename)
return response
else:
raise Http404
else:
return HttpResponse(status=403)
@login_required
def delete_resume(request, volunteer_id):
user = request.user
if int(user.volunteer.id) == int(volunteer_id):
if request.method == 'POST':
try:
delete_volunteer_resume(volunteer_id)
return HttpResponseRedirect(reverse('volunteer:profile', args=(volunteer_id,)))
except:
raise Http404
else:
return HttpResponse(status=403)
'''
The View to edit Volunteer Profile
'''
class VolunteerUpdateView(LoginRequiredMixin, UpdateView, FormView):
form_class = VolunteerForm
template_name = 'volunteer/edit.html'
success_url = reverse_lazy('volunteer:profile')
def get_object(self, queryset=None):
volunteer_id = self.kwargs['volunteer_id']
obj = Volunteer.objects.get(pk=volunteer_id)
return obj
def form_valid(self, form):
volunteer_id = self.kwargs['volunteer_id']
volunteer = get_volunteer_by_id(volunteer_id)
organization_list = get_organizations_ordered_by_name()
if 'resume_file' in self.request.FILES:
my_file = form.cleaned_data['resume_file']
if validate_file(my_file):
# delete an old uploaded resume if it exists
has_file = has_resume_file(volunteer_id)
if has_file:
try:
delete_volunteer_resume(volunteer_id)
except:
raise Http404
else:
return render(self.request, 'volunteer/edit.html',
{'form': form, 'organization_list': organization_list, 'volunteer': volunteer,
'resume_invalid': True,})
volunteer_to_edit = form.save(commit=False)
organization_id = self.request.POST.get('organization_name')
organization = get_organization_by_id(organization_id)
if organization:
volunteer_to_edit.organization = organization
else:
volunteer_to_edit.organization = None
# update the volunteer
volunteer_to_edit.save()
return HttpResponseRedirect(reverse('volunteer:profile', args=(volunteer_id,)))
'''
The view to diaplay Volunteer profile.
It uses DetailView which is a generic class-based views are designed to display data.
'''
class ProfileView(LoginRequiredMixin, DetailView):
template_name = 'volunteer/profile.html'
def get_object(self, queryset=None):
volunteer_id = self.kwargs['volunteer_id']
obj = Volunteer.objects.get(id=self.kwargs['volunteer_id'])
if obj:
return obj
else:
return HttpResponse(status=403)
'''
The view generate Report.
GenerateReportView calls two other views(ShowFormView, ShowReportListView) within it.
'''
class GenerateReportView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
view = ShowFormView.as_view()
return view(request, *args,**kwargs)
def post(self, request, *args, **kwargs):
view = ShowReportListView.as_view()
return view(request, *args, **kwargs)
class ShowFormView(LoginRequiredMixin, FormView):
"""
Displays the form
"""
model = Volunteer
form_class = ReportForm
template_name = "volunteer/report.html"
class ShowReportListView(LoginRequiredMixin, ListView):
"""
Generate the report using ListView
"""
template_name = "volunteer/report.html"
def post(self, request, *args, **kwargs):
volunteer_id = self.kwargs['volunteer_id']
event_list = get_signed_up_events_for_volunteer(volunteer_id)
job_list = get_signed_up_jobs_for_volunteer(volunteer_id)
event_name = self.request.POST['event_name']
job_name = self.request.POST['job_name']
start_date = self.request.POST['start_date']
end_date = self.request.POST['end_date']
report_list = get_volunteer_report(volunteer_id, event_name, job_name, start_date, end_date)
total_hours = calculate_total_report_hours(report_list)
return render(request, 'volunteer/report.html',
{'report_list': report_list, 'total_hours': total_hours, 'notification': True,
'job_list': job_list, 'event_list': event_list, 'selected_event': event_name,
'selected_job': job_name})
@login_required
def search(request):
if request.method == 'POST':
form = SearchVolunteerForm(request.POST)
if form.is_valid():
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
city = form.cleaned_data['city']
state = form.cleaned_data['state']
country = form.cleaned_data['country']
organization = form.cleaned_data['organization']
search_result_list = search_volunteers(first_name, last_name, city, state, country, organization)
return render(request, 'volunteer/search.html', {'form' : form, 'has_searched' : True, 'search_result_list' : search_result_list})
else:
form = SearchVolunteerForm()
return render(request, 'volunteer/search.html', {'form' : form, 'has_searched' : False})
|
akvo/akvo-rsr | akvo/iati/exports/elements/current_situation.py | Python | agpl-3.0 | 877 | 0.003421 | # -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from lxml import etree
def current_situation(project):
"""
Generate the current situation element, a description el | ement with type "1" and akvo type "9" | .
:param project: Project object
:return: A list of Etree elements
"""
if project.current_status:
element = etree.Element("description")
element.attrib['type'] = '1'
element.attrib['{http://akvo.org/iati-activities}type'] = '9'
narrative_element = etree.SubElement(element, "narrative")
narrative_element.text = project.current_status
return [element]
return []
|
apyrgio/ganeti | test/py/cmdlib/backup_unittest.py | Python | bsd-2-clause | 7,522 | 0.003722 | #!/usr/bin/python
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for LUBackup*"""
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
from ganeti import query
from testsupport import *
import testutils
class TestLUBackupPrepare(CmdlibTestCase):
@patchUtils("instance_utils")
def testPrepareLocalExport(self, utils):
utils.ReadOneLineFile.return_value = "cluster_secret"
inst = self.cfg.AddNewInstance()
op = opcodes.OpBackupPrepare(instance_name=inst.name,
mode=constants.EXPORT_MODE_LOCAL)
self.ExecOpCode(op)
@patchUtils("instance_utils")
def testPrepareRemoteExport(self, utils):
utils.ReadOneLineFile.return_value = "cluster_secret"
inst = self.cfg.AddNewInstance()
self.rpc.call_x509_cert_create.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(inst.primary_node,
("key_name",
testutils.ReadTestData("cert1.pem")))
op = opcodes.OpBackupPrepare(instance_name=inst.name,
mode=constants.EXPORT_MODE_REMOTE)
self.ExecOpCode(op)
class TestLUBackupExportBase(CmdlibTestCase):
def setUp(self):
super(TestLUBackupExportBase, self).setUp()
self.rpc.call_instance_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, True)
self.rpc.call_blockdev_assemble.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, ("/dev/mock_path",
"/dev/mock_link_name",
None))
self.rpc.call_blockdev_shutdown.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
self.rpc.call_blockdev_snapshot.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, ("mock_vg", "mock_id"))
self.rpc.call_blockdev_remove.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
self.rpc.call_export_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, "export_daemon")
def ImpExpStatus(node_uuid, name):
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid,
| [objects.ImportExportStatus(
| exit_status=0
)])
self.rpc.call_impexp_status.side_effect = ImpExpStatus
def ImpExpCleanup(node_uuid, name):
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid)
self.rpc.call_impexp_cleanup.side_effect = ImpExpCleanup
self.rpc.call_finalize_export.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
def testRemoveRunningInstanceWithoutShutdown(self):
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
op = opcodes.OpBackupExport(instance_name=inst.name,
target_node=self.master.name,
shutdown=False,
remove_instance=True)
self.ExecOpCodeExpectOpPrereqError(
op, "Can not remove instance without shutting it down before")
def testUnsupportedDiskTemplate(self):
inst = self.cfg.AddNewInstance(disk_template=constants.DT_FILE)
op = opcodes.OpBackupExport(instance_name=inst.name,
target_node=self.master.name)
self.ExecOpCodeExpectOpPrereqError(
op, "Export not supported for instances with file-based disks")
class TestLUBackupExportLocalExport(TestLUBackupExportBase):
def setUp(self):
super(TestLUBackupExportLocalExport, self).setUp()
self.inst = self.cfg.AddNewInstance()
self.target_node = self.cfg.AddNewNode()
self.op = opcodes.OpBackupExport(mode=constants.EXPORT_MODE_LOCAL,
instance_name=self.inst.name,
target_node=self.target_node.name)
self.rpc.call_import_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.target_node, "import_daemon")
def testExportWithShutdown(self):
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
op = self.CopyOpCode(self.op, instance_name=inst.name, shutdown=True)
self.ExecOpCode(op)
def testExportDeactivatedDisks(self):
self.ExecOpCode(self.op)
def testExportRemoveInstance(self):
op = self.CopyOpCode(self.op, remove_instance=True)
self.ExecOpCode(op)
def testValidCompressionTool(self):
op = self.CopyOpCode(self.op, compress="lzop")
self.cfg.SetCompressionTools(["gzip", "lzop"])
self.ExecOpCode(op)
def testInvalidCompressionTool(self):
op = self.CopyOpCode(self.op, compress="invalid")
self.cfg.SetCompressionTools(["gzip", "lzop"])
self.ExecOpCodeExpectOpPrereqError(op, "Compression tool not allowed")
class TestLUBackupExportRemoteExport(TestLUBackupExportBase):
def setUp(self):
super(TestLUBackupExportRemoteExport, self).setUp()
self.inst = self.cfg.AddNewInstance()
self.op = opcodes.OpBackupExport(mode=constants.EXPORT_MODE_REMOTE,
instance_name=self.inst.name,
target_node=[],
x509_key_name=["mock_key_name"],
destination_x509_ca="mock_dest_ca")
def testRemoteExportWithoutX509KeyName(self):
op = self.CopyOpCode(self.op, x509_key_name=self.REMOVE)
self.ExecOpCodeExpectOpPrereqError(op,
"Missing X509 key name for encryption")
def testRemoteExportWithoutX509DestCa(self):
op = self.CopyOpCode(self.op, destination_x509_ca=self.REMOVE)
self.ExecOpCodeExpectOpPrereqError(op,
"Missing destination X509 CA")
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
nlamirault/python-freeboxclient | freeboxclient/simple.py | Python | apache-2.0 | 1,473 | 0 | #
# Copyright 2013 Nicolas Lamirault <nicolas.lamirault@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from cliff import command
import freeboxclient
from freeboxclient import config
class About(command.Command):
"""A command that prints information about this tool."""
log = logging.getLogger(__name__)
def take_action(self, parsed_args):
self.app.stdout.write('This is a client for the Freebox OS.\n')
self.app.stdout.write('Copyright (c) %s\n' % freeboxclient.__author__)
class Configuration(command.Command):
"""A command that prints available configuration."""
log = logging.getLogger(__name__)
def take_ | action(self, parsed_args):
conf = config.load_configuration()
if conf:
self.app.stdout.write("Configuration | :\n%s" % conf)
else:
self.app.stdout.write("Configuration file %s doesn't exists.\n" %
config)
|
naturalness/unnaturalcode | unnaturalcode/mutators.py | Python | agpl-3.0 | 7,294 | 0.010008 | from copy import copy
from random import randint
class Mutators(object):
def deleteRandom(self, vFile):
"""Delete a random token from a file."""
ls = copy(vFile.scrubbed)
idx = randint(1, len(ls)-2)
after = ls[idx+1]
token = ls.pop(idx)
if token.type == 'ENDMARKER':
return self.deleteRandom(vFile)
vFile.mutate(ls, ls[idx-1], token, after)
return None
def insertRandom(self, vFile):
ls = copy(vFile.scrubbed)
token = ls[randint(0, len(ls)-1)]
pos = randint(1, len(ls)-2)
inserted = ls.insert(pos, token)
if inserted[0].type == 'ENDMARKER':
return self.insertRandom(vFile)
vFile.mutate(ls, ls[pos-1], inserted[0], ls[pos+1])
return None
def replaceRandom(self, vFile):
ls = copy(vFile.scrubbed)
token = ls[randint(0, len(ls)-1)]
pos = randint(1, len(ls)-2)
oldToken = ls.pop(pos)
if oldToken.type == 'ENDMARKER':
return self.replaceRandom(vFile)
inserted = ls.insert(pos, token)
if inserted[0].type == 'ENDMARKER':
return self.replaceRandom(vFile)
vFile.mutate(ls, ls[pos-1], inserted[0], ls[pos+1])
return None
def dedentRandom(self, vFile):
s = copy(vFile.original)
lines = s.splitlines(True);
while True:
line = randint(0, len(lines)-1)
if beginsWithWhitespace.match(lines[line]):
lines[line][0] = ''
break
vFile.mutatedLexemes = vFile.lm("".join(lines))
vFile.mutatedLocation = pythonLexeme.fromTuple((token.INDENT, ' ', (line+1, 0), (line+1, 0)))
return None
def indentRandom(self, vFile):
s = copy(vFile.original)
lines = s.splitlines(True);
line = randint(0, len(lines)-1)
if beginsWithWhitespace.match(lines[line]):
lines[line] = lines[line][0] + lines[line]
else:
lines[line] = " " + lines[line]
vFile.mutatedLexemes = vFile.lm("".join(lines))
vFile.mutatedLocation = pythonLexeme.fromTuple((token.INDENT, ' ', (line+1, 0), (line+1, 0)))
return None
def punctRandom(self, vFile):
s = copy(vFile.original)
charPos = randint(1, len(s)-1)
linesbefore = s[:charPos].splitlines(True)
line = len(linesbefore)
lineChar = len(linesbefore[-1])
c = s[charPos:charPos+1]
if (funny.match(c)):
new = s[:charPos] + s[charPos+1:]
vFile.mutatedLexemes = vFile.lm(new)
vFile.mutatedLocation = pythonLexeme.fromTuple((token.OP, c, (line, lineChar), (line, lineChar)))
return None
else:
return self.punctRandom(vFile)
#def keyRandom(self, vFile):
#s = copy(vFile.original)
def nameRandom(self, vFile):
return self.deleteWordRandom(vFile)
def insertWordRandom(self, vFile):
s = copy(vFile.original)
while True:
char = s[randint(1, len(s)-1)]
charPos = randint(1, len(s)-1)
linesbefore = s[:charPos].splitlines(True)
line = len(linesbefore)
lineChar = len(linesbefore[-1])
c = s[charPos:charPos+1]
if (name.match(char)):
break
new = s[:charPos] + char + s[charPos:]
vFile.mutatedLexemes = vFile.lm(new)
vFile.mutatedLocation = pythonLexeme.fromTuple((token.OP, c, (line, lineChar), (line, lineChar)))
return None
def deleteWordRandom(self, vFile):
s = copy(vFile.original)
while True:
charPos = randint(1, len(s)-1)
linesbefore = s[:charPos].splitlines(True)
line = len(linesbefore)
lineChar = len(linesbefore[-1])
c = s[charPos:charPos+1]
if (name.match(c)):
break
new = s[:charPos] + s[charPos+1:]
vFile.mutatedLexemes = vFile.lm(new)
vFile.mutatedLocation = pythonLexeme.fromTuple((token.OP, c, (line, lineChar), (line, lineChar)))
return None
def insertPunctRandom(self, vFile):
s = copy(vFile.original)
if not punct.search(s):
return "No punctuation"
while (True):
char = s[randint(1, len(s)-1)]
if (punct.match(char)):
break
charPos = randint(1, len(s)-1)
linesbefore = s[:charPos].splitlines(True)
line = len(linesbefore)
lineChar = len(linesbefore[-1])
c = s[charPos:charPos+1]
new = s[:charPos] + char + s[charPos:]
vFile.mutatedLexemes = vFile.lm(new)
vFile.mutatedLocation = pythonLexeme.fromTuple((token.OP, c, (line, lineChar), (line, lineChar)))
return None
def deleteNumRandom(self, vFile):
s = copy(vFile.original)
if not numeric.search(s):
return "No numbers"
positions = [x.start() for x in numeric.finditer(s)]
while True:
if (len(positions) == 1):
charPos = positions[0]
else:
charPos = positions[randint(1, len(positions)-1)]
linesbefore = s[:charPos].splitlines(True)
line = len(linesbefore)
lineChar = len(linesbefore[-1])
c = s[charPos:charPos+1]
if (numeric.match(c)):
break
new = s[:charPos] + s[charPos+1:]
vFile.mutatedLexemes = vFile.lm(new)
vFile.mutatedLocation = pythonLexeme.fromTuple((token.OP, c, (line, lineChar), (line, lineChar)))
return None
def insertNumRandom(self, vFile):
s = copy(vFile.original)
char = str(randint(0, 9))
charPos = randint(1, len(s)-1)
linesbefore = s[:charPos].splitlines(True)
line = len(linesbefore)
lineChar = len(linesbefore[-1])
c = s[charPos:charPos+1]
new = s[:charPos] + char + s[charPos:]
vFile.mutatedLexemes = vFile.lm(new)
vFile.mutatedLocation = pythonLexeme.fromTuple((token.OP, c, (line, lineChar), (line, lineChar)))
return None
def deletePunctRandom(self, vFile):
s = copy(vFile.original)
if not punct.search(s):
return "No punctuation"
while True:
charPos = randint(1, len(s)-1)
linesbefore = s[:charPos].splitlines(True | )
line = len(linesbefore)
lineChar = len(linesbefore[-1])
c = s[charPos:charPos+1]
if (punct.match(c)):
break
new = s[:charPos] + s[charPos+1:]
vFile.mutatedLexemes = vFile.lm(new)
vFile.mutatedLocation = pythonLexeme.fromTuple((token.OP, c, (line, l | ineChar), (line, lineChar)))
return None
def colonRandom(self, vFile):
s = copy(vFile.original)
while True:
charPos = randint(1, len(s)-1)
linesbefore = s[:charPos].splitlines(True)
line = len(linesbefore)
lineChar = len(linesbefore[-1])
c = s[charPos:charPos+1]
if (c == ':'):
break
new = s[:charPos] + s[charPos+1:]
vFile.mutatedLexemes = vFile.lm(new)
vFile.mutatedLocation = pythonLexeme.fromTuple((token.OP, c, (line, lineChar), (line, lineChar)))
return None
|
googlefonts/diffbrowsers | bin/test_gf_autohint.py | Python | apache-2.0 | 2,225 | 0.004494 | """
If a family has been hinted with ttfautohint. The x-height must remain
the same as before. Users do notice changes:
https://github.com/google/fonts/issues/644
https://github.com/google/fonts/issues/528
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import argparse
import os
import time
import logging
from diffbrowsers.diffbrowsers import DiffBrowsers
from diffbrowsers.utils import load_browserstack_credentials, cli_reporter
from diffbrowsers.browsers import test_browsers
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('fonts_after', nargs="+", help="Fonts after paths")
before_group = parser.add_argument_group(title="Fonts before input")
before_input_group = before_group.add_mutually_exclusive_group(required=True)
before_input_group.add_argument('-fb', '--fonts-before', nargs="+",
help="Fonts before paths")
before_input_group.add_argument('-gf', '--from-googlefonts', action='store_true',
help="Diff against GoogleFonts instead of fonts_before")
parser.add_argument('-o', '--output-dir', help="Directory for output images",
required=True | )
args = parser.parse_args()
auth = load_browserstack_credentials()
browsers_to_test = test_browsers['all_browsers']
fonts_before = 'from-googlefonts' if args.from_googlefonts \
else args.fonts_before
diffbrowsers = DiffBrowsers(dst_dir=args.output_dir, browsers=browsers_to_test)
diffbrowsers.new_session(fonts_before, args.fonts_after)
diffbrowsers.diff_view('waterfall', gen_gifs=True)
logger.info("Sleeping for 1 | 0 secs. Giving Browserstack api a rest")
time.sleep(10)
diffbrowsers.update_browsers(test_browsers['osx_browser'])
diffbrowsers.diff_view('glyphs-modified', gen_gifs=True)
report = cli_reporter(diffbrowsers.stats)
report_path = os.path.join(args.output_dir, 'report.txt')
with open(report_path, 'w') as doc:
doc.write(report)
print(report)
if __name__ == '__main__':
main()
|
checkr/fdep | tests/fixtures/serve/app.py | Python | mit | 36 | 0 | def | classify(text):
return T | rue
|
chopmann/warehouse | tasks/__init__.py | Python | apache-2.0 | 603 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License | .
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import invoke |
from . import pip
ns = invoke.Collection(pip)
|
KennethPierce/pylearnk | pylearn2/space/__init__.py | Python | bsd-3-clause | 85,589 | 0.000093 | """
Classes that define how vector spaces are formatted
Most of our models can be viewed as linearly transforming
one vector space to another. These classes define how the
vector spaces should be represented as theano/numpy
variables.
For example, the VectorSpace class just represents a
vector space with a vector, and the model can transform
between spaces with a matrix multiply. The Conv2DSpace
represents a vector space as an image, and the model
can transform between spaces with a 2D convolution.
To make models as general as possible, models should be
written in terms of Spaces, rather than in terms of
numbers of hidden units, etc. The model should also be
written to transform between spaces using a generic
linear transformer from the pylearn2.linear module.
The Space class is needed so that the model can specify
what kinds of inputs it needs and what kinds of outputs
it will produce when communicating with other parts of
the library. The model also uses Space objects internally
to allocate parameters like hidden unit bias terms in
the right space.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import functools, warnings
import numpy as np
import theano
import theano.sparse
from theano import tensor
from theano.tensor import TensorType
from theano.gof.op import get_debug_values
from theano.sandbox.cuda.type import CudaNdarrayType
from pylearn2.utils import py_integer_types, safe_zip, sharedX, wraps
from pylearn2.format.target_format import OneHotFormatter
if theano.sparse.enable_sparse:
# We know scipy.sparse is available
import scipy.sparse
def _is_batch_all(batch, predicate):
"""
Implementation of is_symbolic_batch() and is_numeric_batch().
Returns True iff predicate() returns True for all components of
(possibly composite) batch.
Parameters
----------
batch : any numeric or symbolic batch.
This includes numpy.ndarray, theano.gof.Variable, None, or a (nested)
tuple thereof.
predicate : function.
A unary function of any non-composite batch that returns True or False.
"""
# Catches any CompositeSpace batches that were mistakenly hand-constructed
# using nested lists rather than nested tuples.
assert not isinstance(batch, list)
# Data-less batches such as None or () are valid numeric and symbolic
# batches.
#
# Justification: we'd like
# is_symbolic_batch(space.make_theano_batch()) to always be True, even if
# space is an empty CompositeSpace.
if batch is None or (isinstance(batch, tuple) and len(batch) == 0):
return True
if isinstance(batch, tuple):
subbatch_results = tuple(_is_batch_all(b, predicate)
for b in batch)
result = all(subbatch_results)
# The subbatch_results must be all true, or all false, not a mix.
assert result == any(subbatch_results), ("composite batch had a "
"mixture of numeric and "
"symbolic subbatches. This "
"should never happen.")
return result
else:
return predicate(batch)
def is_symbolic_batch(batch):
"""
Returns True if batch is a symbolic variable.
Note that a batch may be both a symbolic and numeric variable
(e.g. () for empty CompositeSpaces, None for NullSpaces).
"""
return _is_batch_all(batch, lambda x: isinstance(x, theano.gof.Variable))
def is_numeric_batch(batch):
"""
Returns True if batch is a numeric variable.
Note that a batch may be both a symbolic and numeric variable
(e.g. () for empty CompositeSpaces, None for NullSpaces).
"""
def is_numeric(batch):
return isinstance(batch, np.ndarray) or scipy.sparse.issparse(batch)
return _is_batch_all(batch, is_num | eric)
def _dense_to_sparse(batch):
"""
Casts dense batches to sparse batches (non-composite).
Supports both symbolic and numeric variables.
"""
if isinstance(batch, tuple):
raise TypeError("Composite batches not supported.")
assert not isinstance(batch, list)
if is_symbolic_batch(batch):
| assert isinstance(batch, theano.tensor.TensorVariable)
return theano.sparse.csr_from_dense(batch)
else:
assert isinstance(batch, np.ndarray), "type of batch: %s" % type(batch)
return scipy.sparse.csr_matrix(batch)
def _reshape(arg, shape):
"""
Reshapes a tensor. Supports both symbolic and numeric variables.
This is a hack that first converts from sparse to dense, reshapes
the dense tensor, then re-converts from dense to sparse. It is
therefore memory-inefficient and unsuitable for large tensors. It
will be replaced by a proper sparse reshaping Op once Theano
implements that.
"""
if isinstance(arg, tuple):
raise TypeError("Composite batches not supported.")
assert not isinstance(arg, list)
if isinstance(arg, (np.ndarray, theano.tensor.TensorVariable)):
return arg.reshape(shape)
elif isinstance(arg, theano.sparse.SparseVariable):
warnings.warn("Using pylearn2.space._reshape(), which is a "
"memory-inefficient hack for reshaping sparse tensors. "
"Do not use this on large tensors. This will eventually "
"be replaced by a proper Theano Op for sparse "
"reshaping, once that is written.")
dense = theano.sparse.dense_from_sparse(arg)
dense = dense.reshape(shape)
if arg.format == 'csr':
return theano.sparse.csr_from_dense(dense)
elif arg.format == 'csc':
return theano.sparse.csc_from_dense(dense)
else:
raise ValueError('Unexpected sparse format "%s".' % arg.format)
else:
raise TypeError('Unexpected batch type "%s"' % str(type(arg)))
def _cast(arg, dtype):
"""
Does element-wise casting to dtype.
Supports symbolic, numeric, simple, and composite batches.
Returns <arg> untouched if <dtype> is None, or dtype is unchanged
(i.e. casting a float32 batch to float32).
(One exception: composite batches are never returned as-is.
A new tuple will always be returned. However, any components
with unchanged dtypes will be returned untouched.)
"""
if dtype is None:
return arg
assert dtype in tuple(t.dtype for t in theano.scalar.all_types)
if isinstance(arg, tuple):
return tuple(_cast(a, dtype) for a in arg)
elif isinstance(arg, np.ndarray):
# theano._asarray is a safer drop-in replacement to numpy.asarray.
return theano._asarray(arg, dtype=dtype)
elif str(type(arg)) == "<type 'CudaNdarray'>": # numeric CUDA array
if str(dtype) != 'float32':
raise TypeError("Can only cast a numeric CudaNdarray to "
"float32, not %s" % dtype)
else:
return arg
elif (isinstance(arg, theano.gof.Variable) and
isinstance(arg.type, CudaNdarrayType)): # symbolic CUDA array
if str(dtype) != 'float32':
raise TypeError("Can only cast a theano CudaNdArrayType to "
"float32, not %s" % dtype)
else:
return arg
elif scipy.sparse.issparse(arg):
return arg.astype(dtype)
elif isinstance(arg, theano.tensor.TensorVariable):
return theano.tensor.cast(arg, dtype)
elif isinstance(arg, theano.sparse.SparseVariable):
return theano.sparse.cast(arg, dtype)
elif isinstance(arg, theano.sandbox.cuda.var.CudaNdarrayVariable):
return arg
else:
raise TypeError("Unsupported arg type '%s'" % str(type(arg)))
class Space(object):
"""
A vector space that can be transformed by a linear operator.
Space and its subclasses are used to transform a data batch's geometry
(e.g. vector |
vdmtools/vdmtools | test/powertest/tcrun.py | Python | gpl-3.0 | 14,259 | 0.019216 | import gentestcases, cmdline, util, setup, report, convert, resfile
import os, re, locale
true, false = 1,0
#--------------------------------------------------------------------------------
# Execute type checker test environment.
# lang - language to use (SL, PP, RT)
# type - type of test (either spec or impl)
# return - Nothing.
#--------------------------------------------------------------------------------
def execute(lang, type):
# if lang == 'rt':
# report.Progress(3,"Skipping rt for type checker - not applicable")
# return
for posdef in ['pos','def']:
if cmdline.LookUp('TC-Type')[posdef]:
report.setTypeTestType(posdef)
report.Progress (1, "Running " + posdef + " test cases")
util.SetProfileBaseName("gmon-tc-"+lang+"-"+type+"-"+posdef+"-"+cmdline.StartDate())
RegisterExpansionSet(lang, type, posdef)
if (type == 'spec'):
executeSpec(lang,posdef)
else:
executeImpl(lang,posdef)
#--------------------------------------------------------------------------------
# Execute type checker test environment for specifications
# lang - language to use (SL, PP)
# posdef - variant to run (pos, def)
# return - Nothing
#--------------------------------------------------------------------------------
def executeSpec(lang,posdef):
ok = convert.SetupSpecification(lang, 'tc')
if not ok:
report.Error("ABORTING specification test for '" + langi + "'")
return
# counter to indicate progress
total = 1
# number of spec. files to run in one vdmde execution
jobSize = cmdline.LookUp('spec-job-size')
# Initialize the extraction of test cases.
gentestcases.StartSearch('tc', lang, 'spec')
# indicates if any spec test case produced an error.
anyError = false
# Initialize coverage file
coverageFile = "coverage-tc-"+lang+"-"+posdef+"-"+cmdline.StartDate()
while ( true ):
testCases = gentestcases.NextTestCases(jobSize)
if testCases == []:
break
startIndex = total
endIndex = total+len(testCases) -1
report.Progress(2, "Handling test cases " + str(startIndex) + "..." + str(endIndex))
# Prepare the next test run - the parameter 'spec-job-size' tells how
# many testcases should be executed in each run.
names = []
util.DeleteFiles([".vdmtest"])
for name in testCases:
# prepare test cases.
ok = PrepareSpecCase(name, lang, posdef)
anyError = not ok or anyError
if ok:
names.append(name)
# Get ready for next iteration.
total = total +1
# Run the test cases
if names != []:
report.Progress(3, "Running test cases " + str(startIndex) + "..." + str(endIndex))
report.setTestCaseName("testcase " + str(startIndex) + "..." + str(endIndex))
okNames = RunSpecTestCases(names, lang, posdef, coverageFile)
util.MoveProfile()
# Clean Up.
for testName in names:
ok = (okNames.count(testName) > 0)
if util.CleanFile(ok):
baseName = util.ExtractName(testName)
util.DeleteFiles([baseName+".vdm", baseName+".ast", baseName+".arg", baseName+".arg.res", baseName+".debug", baseName+".arg.pt","debug.arg"])
anyError = anyError and ok
# Clean up .vdmde and .vdmtest
if util.CleanFile(not anyError):
util.DeleteFiles([".vdmde",".vdmtest"])
#--------------------------------------------------------------------------------
# Execute type checker test environment for specifications
# lang - language to use (SL, PP)
# posdef - variant to run (pos, def)
# return - Nothing
#--------------------------------------------------------------------------------
def executeImpl(lang,posdef):
# counter to indicate progress
total = 1
# jobSize is used to give a low level of outputting
jobSize = cmdline.LookUp('spec-job-size')
# Initialize the extraction of test cases.
gentestcases.StartSearch('tc', lang, 'impl')
name = gentestcases.NextTestCase()
while (name != None):
report.setTestCaseName(name)
if (total % jobSize) == 1:
report.Progress(2, "Handling test cases " + str(total) + "..." + str(total + jobSize-1))
r | eport.Progress(3, "Running " + name)
ok = RunImplTestCase(name, lang, posdef)
if util.CleanFile(ok):
bn = util.ExtractName(name)
util.DeleteFiles([bn+".vdm"])
name = gentestcases.NextTestCase()
total = total +1
util.MoveProfile()
#-------------------------------------------------------------- | ------------------
# Prepare a single test case for specification test run.
# name - the full name of the .vdm file to test
# lang - the language to use (SL, PP)
# posdef - variant to run (pos,def)
# return - a boolean which indicates whether the preparation went ok.
#--------------------------------------------------------------------------------
def PrepareSpecCase(name, lang, posdef):
report.Progress(3, "preparing " + name)
report.setTestCaseName(name)
ok = convert.ConvertLanguage(lang, name)
if ok:
parser = cmdline.LookUpWildCard('tc', lang, 'spec', 'parser', posdef)
ok = convert.VDM2AST(name, parser,false)
if ok:
ok = convert.CreateOptionsFile(name)
if posdef == 'pos':
posdefStr = '<POS>'
else:
posdefStr = '<DEF>'
if ok:
convert.CreateDebugFile(name, "debug-file-tc", {'<<POSDEF>>' : posdefStr} )
if ok:
ok = convert.CreateArgFile(name, "arg-file-tc", {'<<POSDEF>>' : posdefStr} )
if ok:
ok = convert.AppendToDotVdmtest(name)
# Clean up if test case failed
if not ok and util.CleanFile(ok):
baseName = util.ExtractName(name)
util.DeleteFiles([baseName+".vdm", baseName+".ast", baseName+".arg", baseName+".debug"])
return ok
#--------------------------------------------------------------------------------
# Execute the vdmde binary with a number of test cases, and verify that they
# produces an expected result.
# names - A sequence of full names of the input vdm file.
# lang - Language to use (SL or PP)
# posdef - Variant to run.
# return - A sequence of full names for all the test cases which failed.
#--------------------------------------------------------------------------------
def RunSpecTestCases(names, lang, posdef, coverageFile):
# remove files we expect output in to.
for fullName in names:
util.RemoveTestFiles(fullName, [".arg.pt", ".arg.res"])
# run the test cases
interpreter = cmdline.LookUpWildCard('tc', lang, 'spec', 'interpreter', posdef)
cmd = interpreter + " -a -b -I -D -P -R " + coverageFile
exitCode = util.RunCommand(cmd, 0, "Possible core dump while interpreting specification.", false, true)
okNames = []
# Now validate the results
for fullName in names:
bn = util.ExtractName(fullName)
resName = bn + ".arg.res"
report.setTestCaseName(fullName)
# See if a result file was created
if not os.path.exists(resName):
report.Error("No result generated for test case " + fullName,
"Maybe the interpreting toolbox failed for one of the previous test cases")
continue
# read the result from the result file, and translate it to a list of numbers
result = TranslateResultSpec(fullName)
if result == None:
continue
# Find the expected result file
expResName = resfile.FindResFile(fullName)
if expResName == None :
if util.KeepFile(false):
WriteResult(fullName, result)
continue
# Validate the result.
report.Progress(4,"Validating result with result file: '" + expResName + "'")
ok = ValidateResult(fullName, expResName, result, None, None)
if ok:
okNames.append(fullName)
if util.KeepFile(ok):
WriteResult(fullName, result)
return okNames
#--------------------------------------------------------------------------------
# Run test case for implementation.
# fullName - Name of the original vdm file
# lang - The language to run
# posdef - either pos or def.
# returns true if no errors occur.
#--------------------------------------------------------------------------------
def RunImplTestCase(fullName, lang, posdef):
ok = convert.ConvertLanguage(lang, fullName)
# Remove the files we expect output in to |
Triv90/SwiftUml | swift/proxy/controllers/obj.py | Python | apache-2.0 | 50,946 | 0.000059 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source httplib connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
import itertools
import mimetypes
import re
import time
from datetime import datetime
from urllib import unquote, quote
from hashlib import md5
from eventlet import sleep, GreenPile
from eventlet.queue import Queue
from eventlet.timeout import Timeout
from swift.common.utils import ContextPool, normalize_timestamp, \
config_true_value, public, json, csv_append
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_metadata, check_object_creation, \
CONTAINER_LISTING_LIMIT, MAX_FILE_SIZE
from swift.common.exceptions import ChunkReadTimeout, \
ChunkWriteTimeout, ConnectionTimeout, ListingIterNotFound, \
ListingIterNotAuthorized, ListingIterError, SloSegmentError
from swift.common.http import is_success, is_client_error, HTTP_CONTINUE, \
HTTP_CREATED, HTTP_MULTIPLE_CHOICES, HTTP_NOT_FOUND, HTTP_CONFLICT, \
HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE, \
HTTP_INSUFFICIENT_STORAGE, HTTP_OK
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
HTTPServerError, HTTPServiceUnavailable, Request, Response, \
HTTPClientDisconnect
def segment_listing_iter(listing):
listing = iter(listing)
while True:
seg_dict = listing.next()
if isinstance(seg_dict['name'], unicode):
seg_dict['name'] = seg_dict['name'].encode('utf-8')
yield seg_dict
def copy_headers_into(from_r, to_r):
"""
Will copy desired headers from from_r to to_r
:params from_r: a swob Request or Response
:params to_r: a swob Request or Response
"""
for k, v in from_r.headers.items():
if k.lower().startswith('x-object-meta-'):
to_r.headers[k] = v
def check_content_type(req):
if not req.environ.get('swift.content_type_overriden') and \
';' in req.headers.get('content-type', ''):
for param in req.headers['content-type'].split(';')[1:]:
if param.lstrip().startswith('swift_'):
return HTTPBadRequest("Invalid Content-Type, "
"swift_* is not a valid parameter name.")
return None
class SegmentedIterable(object):
"""
Iterable that returns the object contents for a segmented object in Swift.
If there's a failure that cuts the transfer short, the response's
`status_int` will be updated (again, just for logging since the original
status would have already been sent to the client).
:param controller: The ObjectController instance to work with.
:param container: The container the object segments are within. If
container is None will derive container from elements
in listing using split('/', 1).
:param listing: The listing of object segments to iterate over; this may
be an iterator or list that returns dicts with 'name' and
'bytes' keys.
:param response: The swob.Response this iterable is associated with, if
any (default: None)
"""
def __init__(self, controller, container, listing, response=None,
is_slo=False):
self.controller = controller
self.container = container
self.listing = segment_listing_iter(listing)
self.is_slo = is_slo
self.segment = 0
self.segment_dict = None
self.segment_peek = None
self.seek = 0
self.segment_iter = None
# See NOTE: swift_conn at top of file about this.
self.segment_iter_swift_conn = None
self.position = 0
self.response = response
if not self.response:
self.response = Response()
self.next_get_time = 0
def _load_next_segment(self):
"""
Loads the self.segment_iter with the next object segment's contents.
:raises: StopIteration when there are no more object segments or
segment no longer matches SLO manifest specifications.
"""
try:
self.segment += 1
self.segment_dict = self.segment_peek or self.listing.next()
self.segment_peek = None
if self.container is None:
container, obj = \
self.segment_dict['name'].lstrip('/').split('/', 1)
else:
container, obj = self.container, self.segment_dict['name']
partition, nodes = self.controller.app.object_ring.get_nodes(
self.controller.account_name, container, obj)
path = '/%s/%s/%s' % (self.controller.account_name, container, obj)
req = Request.blank(path)
if self.seek:
req.range = 'bytes=%s-' % self.seek
self.seek = 0
if not self.is_slo and self.segment > \
self.controller.app.rate_limit_after_segment:
sleep(max(self.next_get_time - time.time(), 0))
self.next_get_time = time.time() + \
1.0 / self.controller.app.rate_limit_segments_per_sec
nodes = self.controller.app.sort_nodes(nodes)
resp = self.controller.GETorHEAD_base(
req, _('Object'), partition,
self.controller.iter_nodes(partition, nodes,
self.controller.app.object_ring),
path, len(nodes))
if self.is_slo and resp.status_int == HTTP_NOT_FOUND:
raise SloSegmentError(_(
'Could not load object segment %(path)s:'
' %(status)s') % {'path': path, 'status': resp.status_int})
if not is_success(resp.status_int):
raise Exception(_(
'Could not load object segment %(path)s:'
' %(status)s') % {'path': path, 'status': resp.status_int})
if self.is_slo:
if (resp.content_length != self.segment_dict['bytes'] or
resp.etag != self.segment_dict['hash']):
raise SloSegmentError(_(
'Object segment no longer valid: '
| '%(path)s etag: %(r_etag)s != %(s_etag)s or '
'size: %(r_size)s != %(s_size)s') %
{'path': path, 'r_e | tag': resp.etag,
's_etag': self.segment_dict['hash'],
'r_size': resp.content_length,
's_size': self.segment_dict['bytes']})
self.segment_iter = resp.app_iter
# See NOTE: swift_conn at top of file about this.
self.segment_iter_swift_conn = getattr(resp, 'swift_conn', None)
except StopIteration:
raise
except SloSegmentError, err:
if not getat |
artwr/airflow | airflow/hooks/base_hook.py | Python | apache-2.0 | 3,203 | 0 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import random
from airflow.models.connection import Connection
from airflow.exceptions import AirflowException
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(LoggingMixin):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
@provide_session
def _get_connections_from_db(cls, conn_id, session=None):
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id):
con | n = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id):
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
log = LoggingMixin(). | log
log.info("Using connection to: %s", conn.debug_info())
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
|
TNG/svnfiltereddump | functional_tests/command_line_feature_tests.py | Python | gpl-3.0 | 13,483 | 0.013053 |
import unittest
from subprocess import call
from functional_test_environment import TestEnvironment
class ComandLineFeatureTests(unittest.TestCase):
def setUp(self):
self.env = TestEnvironment()
def tearDown(self):
self.env.destroy()
def check_log_of_file_in_rev(self, name, rev, exected_log):
( parsed_log, error ) = | self.env.get_log_of_file_in_rev(name, rev)
if error is None:
self.assertEquals(parsed_log, exected_log)
else:
self.fail(
"Failed to get log of file %s in revision %d with error:\n%s"
% ( nam | e, rev, error )
)
def test_basic_call(self):
env = self.env
# Revision 1
env.mkdir('a')
env.add_file('a/bla', 'xxx')
env.propset('a/bla', 'some_prop', 'prop_value')
env.mkdir('b')
env.add_file('b/blub', 'yyy')
env.commit('c1')
# Revision 2
env.change_file('a/bla', 'zzz')
env.commit('c2')
env.filter_repo( [ 'a' ] )
self.assertTrue(env.is_existing_in_rev('a', 1), 'Dir a was copied in rev 1')
self.assertFalse(env.is_existing_in_rev('b', 1), 'Dir b was not copied in rev 1')
self.assertEquals(env.get_file_content_in_rev('a/bla', 1), 'xxx', 'File a/bla correct in rev 1')
self.assertEquals(env.get_file_content_in_rev('a/bla', 2), 'zzz', 'File a/bla correct in rev 2')
self.assertEquals(env.get_property_in_rev('a/bla', 2, 'some_prop'), 'prop_value', 'File a/bla property correct in rev 2')
self.check_log_of_file_in_rev('a/bla', 2, [ [ 2, 'c2' ], [ 1, 'c1' ] ])
def test_quiet(self):
env = self.env
# Revision 1
env.mkdir('a')
env.add_file('a/bla', 'xxx')
env.propset('a/bla', 'some_prop', 'prop_value')
env.mkdir('b')
env.add_file('b/blub', 'yyy')
env.commit('c1')
# Revision 2
env.change_file('a/bla', 'zzz')
env.commit('c2')
env.filter_repo( [ '-q', 'a' ] )
self.assertTrue(env.is_existing_in_rev('a', 1), 'Dir a was copied in rev 1')
self.assertFalse(env.is_existing_in_rev('b', 1), 'Dir b was not copied in rev 1')
self.assertEquals(env.get_file_content_in_rev('a/bla', 1), 'xxx', 'File a/bla correct in rev 1')
self.assertEquals(env.get_file_content_in_rev('a/bla', 2), 'zzz', 'File a/bla correct in rev 2')
self.assertEquals(env.get_property_in_rev('a/bla', 2, 'some_prop'), 'prop_value', 'File a/bla property correct in rev 2')
self.check_log_of_file_in_rev('a/bla', 2, [ [ 2, 'c2' ], [ 1, 'c1' ] ])
def test_include(self):
env = self.env
env.mkdir('x/a')
env.mkdir('x/b')
env.add_file('x/a/bla', 'xxx')
env.add_file('x/b/blub', 'yyy')
env.add_file('x/foo', 'zzz')
env.add_file('x/bar', 'ZZZ')
env.commit('c1')
env.filter_repo( [ 'x/a', 'x/foo' ] )
self.assertTrue(env.is_existing_in_rev('x/a/bla', 1), 'File bla was copied')
self.assertTrue(env.is_existing_in_rev('x/foo', 1), 'File foo was copied')
self.assertFalse(env.is_existing_in_rev('x/b/blub', 1), 'File blub was NOT copied')
self.assertFalse(env.is_existing_in_rev('x/bar', 1), 'File bar was NOT copied')
def test_include_with_no_mkdirs(self):
env = self.env
env.mkdir('x/a')
env.mkdir('x/b')
env.add_file('x/a/bla', 'xxx')
env.add_file('x/b/blub', 'yyy')
env.add_file('x/foo', 'zzz')
env.add_file('x/bar', 'ZZZ')
env.commit('c1')
env.mkdir_target('x') # Rev 1 in target
env.filter_repo( [ '--no-extra-mkdirs', 'x/a', 'x/foo' ] )
self.assertTrue(env.is_existing_in_rev('x/a/bla', 2), 'File bla was copied')
self.assertTrue(env.is_existing_in_rev('x/foo', 2), 'File foo was copied')
self.assertFalse(env.is_existing_in_rev('x/b/blub', 2), 'File blub was NOT copied')
self.assertFalse(env.is_existing_in_rev('x/bar', 2), 'File bar was NOT copied')
def test_include_file(self):
env = self.env
env.mkdir('x/a')
env.mkdir('x/b')
env.add_file('x/a/bla', 'xxx')
env.add_file('x/b/blub', 'yyy')
env.add_file('x/foo', 'zzz')
env.add_file('x/bar', 'ZZZ')
env.commit('c1')
include_file = env.create_tmp_file("x/a\n" + "x/foo\n")
env.filter_repo( [ '--include-file', include_file ] )
self.assertTrue(env.is_existing_in_rev('x/a/bla', 1), 'File bla was copied')
self.assertTrue(env.is_existing_in_rev('x/foo', 1), 'File foo was copied')
self.assertFalse(env.is_existing_in_rev('x/b/blub', 1), 'File blub was NOT copied')
self.assertFalse(env.is_existing_in_rev('x/bar', 1), 'File bar was NOT copied')
def test_exclude(self):
env = self.env
env.mkdir('x/a')
env.mkdir('x/b')
env.add_file('x/a/bla', 'xxx')
env.add_file('x/b/blub', 'yyy')
env.add_file('x/foo', 'zzz')
env.add_file('x/bar', 'ZZZ')
env.commit('c1')
env.filter_repo( [ 'x', '--exclude', 'x/b', '--exclude', 'x/bar' ] )
self.assertTrue(env.is_existing_in_rev('x/a/bla', 1), 'File bla was copied')
self.assertTrue(env.is_existing_in_rev('x/foo', 1), 'File foo was copied')
self.assertFalse(env.is_existing_in_rev('x/b/blub', 1), 'File blub was NOT copied')
self.assertFalse(env.is_existing_in_rev('x/bar', 1), 'File bar was NOT copied')
def test_exclude_file(self):
env = self.env
env.mkdir('x/a')
env.mkdir('x/b')
env.add_file('x/a/bla', 'xxx')
env.add_file('x/b/blub', 'yyy')
env.add_file('x/foo', 'zzz')
env.add_file('x/bar', 'ZZZ')
env.commit('c1')
exclude_file = env.create_tmp_file("x/b\n" + "x/bar\n")
env.filter_repo( [ 'x', '--exclude-file', exclude_file ] )
self.assertTrue(env.is_existing_in_rev('x/a/bla', 1), 'File bla was copied')
self.assertTrue(env.is_existing_in_rev('x/foo', 1), 'File foo was copied')
self.assertFalse(env.is_existing_in_rev('x/b/blub', 1), 'File blub was NOT copied')
self.assertFalse(env.is_existing_in_rev('x/bar', 1), 'File bar was NOT copied')
def test_dont_drop_empty_revs(self):
env = self.env
# Revision 1
env.mkdir('a')
env.add_file('a/bla', 'xxx')
env.commit('c1')
# Revision 2
env.mkdir('b')
env.commit('c2')
# Revision 3
env.change_file('a/bla', 'yyy')
env.commit('c3')
env.filter_repo( [ 'a', '--keep-empty-revs' ] )
self.check_log_of_file_in_rev('a/bla', 3, [ [ 3, 'c3' ], [ 1, 'c1' ] ])
self.assertEquals(env.get_log_of_revision(2), 'c2')
def test_drop_empty_revs(self):
env = self.env
# Revision 1
env.mkdir('a')
env.add_file('a/bla', 'xxx')
env.commit('c1')
# Revision 2
env.mkdir('b')
env.commit('c2')
# Revision 3
env.change_file('a/bla', 'yyy')
env.commit('c3')
env.filter_repo( [ 'a' ] )
self.check_log_of_file_in_rev('a/bla', 2, [ [ 2, 'c3' ], [ 1, 'c1' ] ])
self.assertEquals(env.get_log_of_revision(2), 'c3')
def test_start_rev(self):
env = self.env
# Revision 1
env.mkdir('a')
env.add_file('a/bla', 'xxx')
env.commit('c1')
# Revision 2
env.change_file('a/bla', 'yyy')
env.commit('c2')
# Revision 3
env.change_file('a/bla', 'zzz')
env.commit('c3')
env.filter_repo( [ '--start-rev', '2', 'a' ] )
self.assertEquals(env.get_file_content_in_rev('a/bla', 1), 'yyy', 'File a/bla correct in rev 1')
self.check_log_of_file_in_rev('a/bla', 1, [ [ 1, 'svnfiltereddump boots trap revision' ] ])
self.assertEquals(env.get_file_content_in_rev('a/bla', 2), 'zzz', 'File a/bla correct in rev 2')
self.check_log_of_file_in_rev('a/bla', 2, [ [ 2, 'c3' ], [ 1, 'svnfiltereddump boots trap revision' ] ])
def |
medialab/ANTA2 | anta/readers/unfccc_reader.py | Python | lgpl-3.0 | 4,367 | 0.003664 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# coding=utf-8
import csv
import logging
import os
from anta.util import config
from anta.annotators import pattern_annotator
from anta.annotators import tika_annotator
from anta.storage.solr_client import SOLRInterface
def read():
data_dir = config.config["data_dir"]
# 1991-2003
unfccc_csv_path = data_dir + "/UNFCCC Documents/unfccc_adaptation_1991-2003.tar/unfccc_adaptation_metadatas.csv"
unfccc_files_path = data_dir + "/UNFCCC Documents/unfccc_adaptation_1991-2003.tar/pdfs"
read_collection(unfccc_csv_path, unfccc_files_path)
# 1991-2012
unfccc_csv_path = data_dir + "/UNFCCC Documents/unfccc_adaptation_1991-2012.t | ar/unfccc_adaptation_metadatas.csv"
unfccc_files_path = data_dir + "/UNFCCC Documents/unfccc_adaptation_1991-2012.tar/pdfs"
read_collection(unfccc_csv_path, unfccc | _files_path)
# 2003-2012
unfccc_csv_path = data_dir + "/UNFCCC Documents/unfccc_adaptation_2003-2012.tar/unfccc_adaptation_metadatas.csv"
unfccc_files_path = data_dir + "/UNFCCC Documents/unfccc_adaptation_2003-2012.tar/pdfs"
read_collection(unfccc_csv_path, unfccc_files_path)
def read_collection(csv_path, files_path):
logging.info("unfccc_reader : Starting read_collection: {}".format(csv_path))
count = 0
documents = convert_csv(csv_path)
si = SOLRInterface()
for document in documents:
if "file_name" not in document:
logging.error("Empty file_name !")
else:
file_path = os.path.join(files_path, document["file_name"])
if os.path.exists(file_path):
#logging.info("id: {}".format(document["id"]))
meta, text = tika_annotator.extract_all(file_path)
if meta and text:
count += 1
if "xmpTPg:NPages" in meta:
document["extent_pages"] = meta["xmpTPg:NPages"]
document["text"] = text
document["text_anta"] = pattern_annotator.extract_text_pos_tags(text, "en", ["NP"])
# index in solr
si.add_document(document)
else:
logging.error("Missing file: {}".format(file_path))
si.commit()
logging.info("unfccc_reader : Finishing read_collection: count: {}".format(count))
def convert_csv(csv_path):
with open(csv_path, 'rb') as csv_file:
# url|pdf_filename|symbol|title|authors|pdf_url|pdf_language|abstract|
# meeting|doctype|topics|keywords|countries|pubdate|year
csv_reader = csv.DictReader(csv_file, delimiter='|')
for row in csv_reader:
document = {}
document["corpus"] = "UNFCCC"
#logging.info("symbol: {}".format(row["symbol"]))
if row["url"]:
document["meta_url"] = row["url"].strip()
if row["pdf_filename"]:
document["file_name"] = row["pdf_filename"].strip()
if row["symbol"]:
document["id"] = "UN" + row["symbol"].replace("/","-").strip()
if row["title"]:
document["title"] = row["title"].strip()
if row["authors"]:
document["creators"] = [x.strip() for x in row["authors"].split(";")]
if row["pdf_url"]:
document["file_url"] = row["pdf_url"].strip()
if row["pdf_language"]:
document["language"] = row["pdf_language"].strip()
if row["abstract"]:
document["abstract"] = row["abstract"].strip()
if row["meeting"]:
document["meeting"] = row["meeting"].strip()
if row["doctype"]:
document["rec_type"] = row["doctype"].strip()
if row["topics"]:
document["topics"] = [x.strip() for x in row["topics"].split(";")]
if row["keywords"]:
document["keywords"] = [x.strip() for x in row["keywords"].split(";")]
if row["countries"]:
document["countries"] = [x.strip() for x in row["countries"].split(";")]
if row["pubdate"]:
document["date_issued"] = row["pubdate"].strip()
if row["year"]:
document["date_year"] = row["year"].strip()
logging.info(document)
yield document
|
khandavally/devstack | EPAQA/vf_allocation.py | Python | apache-2.0 | 4,020 | 0.012687 | from nova.db.sqlalchemy import api as model_api
from nova.db.sqlalchemy.models import PciDevice, Instance, ComputeNode
import collections
#, VFAllocation
session = model_api.get_session()
WORK_LOAD = ["cp","cr"]
def execute_vf_allocation(req_vf,los,req_work,bus_list, *args,**kwargs):
"""This method is called from nova.scheduler.filter_scheduler.FilterScheduler"""
base_dict = collections.OrderedDict()
get_bus_slot = session.query(PciDevice).from_statement("select id,bus,slot from pci_devices where status = :status GROUP BY bus, slot").params(status="available").all()
obj_list = [obj for obj in get_bus_slot if obj.bus in bus_list]
if not obj_list:
return []
""" CLEAR VF_ALLOCATION TABLE DATA """
session.execute("truncate vf_allocation")
""" Get list of PCI devices for Unique bus and slot (unassigned is optional) """
for obj in obj_list:
BUS = obj.bus
SLOT = obj.slot
cp_vf_assigned = []
for j in range(len(WORK_LOAD)):
""" Get the List of VF assigned for each Bus, Slot for workload cp and cr """
GET_ASS_VF = """select bus,slot,function,count(workload) as count_wl from pci_devices where bus = %s and slot = %s and workload = '%s' and status = 'allocated'""" % (BUS, SLOT, str(WORK_LOAD[j]))
cp_vf_ass = int(session.query("count_wl").from_statement(GET_ASS_VF).scalar())
cp_vf_assigned.append(cp_vf_ass)
""" Get the Policy value from the input """
los_ass_final = int(los)
""" Create obtained records as a dictionary """
base_dict[str(BUS)+":"+str(SLOT)] = [{'cp': cp_vf_assigned[0], 'cr': cp_vf_assigned[1]}]
""" VF Allocation Algorithm Logic"""
if (((req_vf % 2 == 0) and (req_work == "cp-cr")) or (req_work == "cp") or (req_work == "cr")):
result = VF_Allocation_Extended_Logic(req_vf,los,req_work,base_dict)
return result
else:
return []
def VF_Allocation_Extended_Logic(req_vf,los,req_work,base_dict):
address_list = []
address_workload_list = []
| tmp_add_store = ("")
REQ_VF = req_work
RESET_COUNT = 0
for k in range(req_vf):
if REQ_VF == "cp-cr" and ( req_vf / 2 != RESET_COUNT ):
req_work = 'cp'
RESET_COUNT = RESET_COUNT + 1
elif REQ_VF == "cp-cr" and ( req_vf / 2 <= RESET_COUNT ):
req_work = 'cr'
filter_data = {k: v for k, v in base_dict.iteritems() if v[0][req_work] < l | os} # Filter the Bus slot having vfs less than los value for selected workload
if req_work == 'cp':
final_list = sorted(filter_data, key=lambda x: (filter_data[x][0]['cp'], filter_data[x][0]['cr'])) # sort the filtered dict based on cp cr count
else:
final_list = sorted(filter_data, key=lambda x: (filter_data[x][0]['cr'], filter_data[x][0]['cp'])) # sort the filtered dict based on cp cr count
if len(final_list) >= 1:
selected_bus_slot = final_list.sort() # Get last bus slot for PCI Instnace request
selected_bus_slot = final_list[-1]
else:
selected_bus_slot = ""
if selected_bus_slot:
bus_,slot_ = selected_bus_slot.split(":")
address = [ad[0] for ad in session.query("address").from_statement("select address from pci_devices where bus = %s and slot = %s and status='available' and function <> 0" % (bus_,slot_)).all() if ad[0] not in address_list]
if address:
address_list.append(address[0])
address_workload_list.append((address[0],req_work))
base_dict[selected_bus_slot][0][req_work] = base_dict[selected_bus_slot][0][req_work] + 1 # Update the vfs count for selected bus,slot with requested workload
else:
break;
if len(address_list) != req_vf:
return []
return address_workload_list
|
domino14/Webolith | djAerolith/wordwalls/apps.py | Python | gpl-3.0 | 96 | 0 | fro | m django.ap | ps import AppConfig
class WordwallsAppConfig(AppConfig):
name = 'wordwalls'
|
livef1/Livef1-web | src/comment.py | Python | gpl-2.0 | 3,104 | 0.02674 | #
# livef1
#
# f1comment.py - classes to store the live F1 comments
#
# Copyright (c) 2014 Marc Bertens <marc.bertens@pe2mbs.nl>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Special thanks to the live-f1 project 'https://launchpad.net/live-f1'
# * Scott James Remnant
# * Dave Pusey
#
# For showing the way of program logic.
#
import logging
import time
import datetime
__version__ = "0.1"
__applic__ = "Live F1 Web"
__author__ = "Marc Bertens"
class F1Text( object ):
def __init__( self, ts = 0, c = '', t = '' ):
self.timestamp = ts
self.clock = c
self.text = t
return
def reset( self ):
self.timestamp = 0
self.clock = ""
self.text = ""
return
class F1Commentary( object ):
def __init__( self, log ):
self.lines = []
self.log = log
return
def reset( self ):
self.lines = []
return
def gethtml( self, div_tag_name ):
output = ""
for elem in self.lines:
if elem.clock:
sep = "-"
else:
| sep = ""
#endif
output = "<tr valign='top'><td>%s</td><td>%s</td><td>%s</td></tr>" % (
elem.clock, sep, elem.text ) + output
return """<div class="%s"><table>%s</table></div>""" % ( div_tag_name, output )
def append( self, new ):
#self.log.info( "Commentary.time : %i" % ( new.timestamp ) )
#self.log.info( "Commentary.text : %s" % ( new.text ) )
if not new. | clock:
secs = new.timestamp % 60
mins = new.timestamp // 60
hours = 0
if ( mins > 60 ):
hours = mins // 60
mins = mins % 60
# endif
# add time stamp
new.clock = "%02i:%02i" % ( hours, mins )
self.lines.append( F1Text( new.timestamp, new.clock, new.text ) )
return
def dump( self ):
for elem in self.lines:
self.log.info( "Commentary : %s" % ( elem.text ) )
# next
return
comment = None
def GetCommentary():
global comment
if comment == None:
comment = F1Commentary( logging.getLogger( 'live-f1' ) )
return comment
# end def
|
cropr/bjk2017 | cd_subscription/migrations/0006_cdsubscription_badgemimetype.py | Python | apache-2.0 | 478 | 0.002092 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migratio | ns.Migration):
dependencies = [
('cd_subscription', '0005_auto_20161125_1908'),
]
operations = [
migrations.AddField(
model_name='cdsubscription',
name='badgemimetype',
field=models.CharField(default='', max_length=20, blank=True, ve | rbose_name='Badge mimetype'),
),
]
|
chrsrds/scikit-learn | sklearn/tree/tests/test_tree.py | Python | bsd-3-clause | 68,270 | 0.000059 | """
Testing for the tree module (sklearn.tree).
"""
import copy
import pickle
from functools import partial
from itertools import product
import struct
import pytest
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import TempMemmap
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn.tree.tree import CRITERIA_CLF
from sklearn.tree.tree import CRITERIA_REG
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae", "friedman_mse")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, | 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
| [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
# NB: despite their names X_sparse_* are numpy arrays (and not sparse matrices)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25,
random_state=0).toarray()
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert s.node_count == d.node_count, (
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.full(len(X), 0.5))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T |
gentnerlab/pyoperant | pyoperant/local_zog.py | Python | bsd-3-clause | 7,782 | 0.006682 | from pyoperant import hwio, components, panels, utils
from pyoperant.interfaces import comedi_, pyaudio_
from pyoperant import InterfaceError
import time
_ZOG_MAP = {
1: ('/dev/comedi0', 2, 0, 2, 8), # box_id:(subdevice,in_dev,in_chan,out_dev,out_chan)
2: ('/dev/comedi0', 2, 4, 2, 16),
3: ('/dev/comedi0', 2, 24, 2, 32),
4: ('/dev/comedi0', 2, 28, 2, 40),
5: ('/dev/comedi0', 2, 48, 2, 56),
6: ('/dev/comedi0', 2, 52, 2, 64),
7: ('/dev/comedi0', 2, 72, 2, 80),
8: ('/dev/comedi0', 2, 76, 2, 88),
9: ('/dev/comedi1', 2, 0, 2, 8),
10: ('/dev/comedi1', 2, 4, 2, 16),
11: ('/dev/comedi1', 2, 24, 2, 32),
12: ('/dev/comedi1', 2, 28, 2, 40),
13: ('/dev/comedi1', 2, 48, 2, 56),
14: ('/dev/comedi1', 2, 52, 2, 64),
15: ('/dev/comedi1', 2, 72, 2, 80),
16: ('/dev/comedi1', 2, 76, 2, 88),
}
dev_name_fmt = 'Adapter 1 (5316) - Output Stream %i'
class ZogAudioInterface(pyaudio_.PyAudioInterface):
"""docstring for ZogAudioInterface"""
def __init__(self, *args, **kwargs):
super(ZogAudioInterface, self).__init__(*args,**kwargs)
def validate(self):
super(ZogAudioInterface, self).validate()
if self.wf.getframerate()==48000:
return True
else:
raise InterfaceError('this wav file must be 48kHz')
class ZogPanel(panels.BasePanel):
"""class for zog boxes """
def __init__(self,id=None, *args, **kwargs):
super(ZogPanel, self).__init__(*args, **kwargs)
self.id = id
# define interfaces
self.interfaces['comedi'] = comedi_.ComediInterface(device_name=_ZOG_MAP[self.id][0])
self.interfaces['pyaudio'] = ZogAudioInterface(device_name= (dev_name_fmt % self.id))
# define inputs
for in_chan in [ii+_ZOG_MAP[self.id][2] for ii in range(4)]:
self.inputs.append(hwio.BooleanInput(interface=self.interfaces['comedi'],
params = {'subdevice': _ZOG_MAP[self.id][1],
'channel': in_chan |
},
)
)
for out_chan in [ii+_ZOG_MAP[self.id][4] for ii in range(8)]:
self.outputs.append(hwio.BooleanOutput(interface=self.interfaces['comedi'],
params = {'subdevice': _ZOG_MAP[self.id][3],
'channel': out_chan
| },
)
)
self.speaker = hwio.AudioOutput(interface=self.interfaces['pyaudio'])
# assemble inputs into components
self.left = components.PeckPort(IR=self.inputs[0],LED=self.outputs[0])
self.center = components.PeckPort(IR=self.inputs[1],LED=self.outputs[1])
self.right = components.PeckPort(IR=self.inputs[2],LED=self.outputs[2])
self.house_light = components.HouseLight(light=self.outputs[3])
self.hopper = components.Hopper(IR=self.inputs[3],solenoid=self.outputs[4])
# define reward & punishment methods
self.reward = self.hopper.reward
self.punish = self.house_light.punish
def reset(self):
for output in self.outputs:
output.write(False)
self.house_light.on()
self.hopper.down()
# self.speaker.stop()
def test(self):
self.reset()
dur = 2.0
for output in self.outputs:
output.write(True)
utils.wait(dur)
output.write(False)
self.reset()
self.reward(value=dur)
self.punish(value=dur)
self.speaker.queue('/usr/local/stimuli/test48k.wav')
self.speaker.play()
time.sleep(1.0)
self.speaker.stop()
return True
class Zog1(ZogPanel):
"""Zog1 panel"""
def __init__(self):
super(Zog1, self).__init__(id=1)
class Zog2(ZogPanel):
"""Zog2 panel"""
def __init__(self):
super(Zog2, self).__init__(id=2)
class Zog3(ZogPanel):
"""Zog3 panel"""
def __init__(self):
super(Zog3, self).__init__(id=3)
class Zog4(ZogPanel):
"""Zog4 panel"""
def __init__(self):
super(Zog4, self).__init__(id=4)
class Zog6(ZogPanel):
"""Zog6 panel"""
def __init__(self):
super(Zog6, self).__init__(id=6)
class Zog8(ZogPanel):
"""Zog8 panel"""
def __init__(self):
super(Zog8, self).__init__(id=8)
class Zog10(ZogPanel):
"""Zog10 panel"""
def __init__(self):
super(Zog10, self).__init__(id=10)
class Zog12(ZogPanel):
"""Zog12 panel"""
def __init__(self):
super(Zog12, self).__init__(id=12)
class Zog13(ZogPanel):
"""Zog13 panel"""
def __init__(self):
super(Zog13, self).__init__(id=13)
class Zog14(ZogPanel):
"""Zog14 panel"""
def __init__(self):
super(Zog14, self).__init__(id=14)
class Zog15(ZogPanel):
"""Zog15 panel"""
def __init__(self):
super(Zog15, self).__init__(id=15)
class Zog16(ZogPanel):
"""Zog16 panel"""
def __init__(self):
super(Zog16, self).__init__(id=16)
# define the panels with cue lights
class ZogCuePanel(ZogPanel):
"""ZogCuePanel panel"""
def __init__(self,id=None):
super(ZogCuePanel, self).__init__(id=id)
for out_chan in [ii+_ZOG_MAP[self.id][4] for ii in range(5,8)]:
self.outputs.append(hwio.BooleanOutput(interface=self.interfaces['comedi'],
params = {'subdevice': _ZOG_MAP[self.id][3],
'channel': out_chan
},
)
)
self.cue = components.RGBLight(red=self.outputs[7],
green=self.outputs[5],
blue=self.outputs[6],
name='cue')
class Zog5(ZogCuePanel):
"""Zog5 panel"""
def __init__(self):
super(Zog5, self).__init__(id=5)
class Zog7(ZogCuePanel):
"""Zog7 panel"""
def __init__(self):
super(Zog7, self).__init__(id=7)
class Zog9(ZogCuePanel):
"""Zog9 panel"""
def __init__(self):
super(Zog9, self).__init__(id=9)
class Zog11(ZogCuePanel):
"""Zog11 panel"""
def __init__(self):
super(Zog11, self).__init__(id=11)
class Zog10(ZogCuePanel):
"""Zog10 panel"""
def __init__(self):
super(Zog10, self).__init__(id=10)
class Zog12(ZogCuePanel):
"""Zog12 panel"""
def __init__(self):
super(Zog12, self).__init__(id=12)
# in the end, 'PANELS' should contain each operant panel available for use
PANELS = {
"1": Zog1,
"2": Zog2,
"3": Zog3,
"4": Zog4,
"5": Zog5,
"6": Zog6,
"7": Zog7,
"8": Zog8,
"9": Zog9,
"10": Zog10,
"11": Zog11,
"12": Zog12,
"13": Zog13,
"14": Zog14,
"15": Zog15,
"16": Zog16,
}
BEHAVIORS = ['pyoperant.behavior',
'glab_behaviors'
]
DATA_PATH = '/home/bird/opdat/'
# SMTP_CONFIG
DEFAULT_EMAIL = 'justin.kiggins@gmail.com'
SMTP_CONFIG = {'mailhost': 'localhost',
'toaddrs': [DEFAULT_EMAIL],
'fromaddr': 'Zog <bird@zog.ucsd.edu>',
'subject': '[pyoperant notice] on zog',
'credentials': None,
'secure': None,
}
|
skobz/Python | Find_Duplicates.py | Python | mit | 651 | 0.019969 | # Name: Pre-Logic Script C | ode
# Created: April, 27, 2015
# Author: Sven Koberwitz
# Purpose: Find the number of occurrences of a value in a Field using Field Calculator.
## Pre-Logic Script Code
import arcpy
uniqueList = {}
## Set the name of the feature class here
fc = "feature_class"
rows = arcpy.SearchCursor(fc)
for row in rows:
## Set the name of the attribute here
value = row. | getValue("field_name")
if value not in uniqueList:
uniqueList[value] = 1
else:
uniqueList[value] = uniqueList[value] + 1
def duplicates(inValue):
return uniqueList[inValue]
## Use this as the calculation formula
duplicates(!field_name!)
|
leoc/home-assistant | homeassistant/components/conversation.py | Python | mit | 2,231 | 0 | """
Support for functionality to have conversations with Home Assistant.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/conversation/
"""
import logging
import re
import warnings
import voluptuous as vol
from homeassistant import core
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['fuzzywuzzy==0.12.0']
ATTR_TEXT = 'text'
DOMAIN = 'conversation'
REGEX_TURN_COMMAND = re.compile(r'turn (?P<name>(?: |\w)+) (?P<command>\w+)')
SERVICE_PROCESS = 'process'
SERVICE_PROCESS_SCHEMA = vol.Schema({
vol.Required(ATTR_TEXT): vol.All(cv.string, vol.Lower),
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Register the process service."""
warnings.filterwarnings('ignore', module='fuzzywuzzy')
from fuzzywuzzy import process as fuzzyExtract
logger = logging.getLogger(__name__)
def process(service):
"""Parse text into commands."""
text = service.data[ATTR_TEXT]
match = REGEX_TURN_COMMAND.match(text)
if not match:
logger.error("Unable to process: %s", text)
return
name, command = match.groups()
entities = {state.entity_id: state.name for state in hass.states.all()}
entity_ids = fuzzyExtract.extractOne(
name, entities, score_cutoff=65)[2]
if not entity_ids:
logger.error(
"Could not find entity id %s from text %s", name, text)
return
if command == 'on':
hass.services.call(core.DO | MAIN, SERVICE_TURN_ON, {
ATTR_ENTITY_ID: entity_ids,
| }, blocking=True)
elif command == 'off':
hass.services.call(core.DOMAIN, SERVICE_TURN_OFF, {
ATTR_ENTITY_ID: entity_ids,
}, blocking=True)
else:
logger.error('Got unsupported command %s from text %s',
command, text)
hass.services.register(
DOMAIN, SERVICE_PROCESS, process, schema=SERVICE_PROCESS_SCHEMA)
return True
|
CompPhysics/ComputationalPhysics2 | doc/LectureNotes/_build/jupyter_execute/logisticregression.py | Python | cc0-1.0 | 22,266 | 0.008758 | #!/usr/bin/env python
# coding: utf-8
# # Logistic Regression
#
# ## Introduction
#
# In linear regression our main interest was centered on learning the
# coefficients of a functional fit (say a polynomial) in order to be
# able to predict the response of a continuous variable on some unseen
# data. The fit to the continuous variable $y_i$ is based on some
# independent variables $\hat{x}_i$. Linear regression resulted in
# analytical expressions for standard ordinary Least Squares or Ridge
# regression (in terms of matrices to invert) for several quantities,
# ranging from the variance and thereby the confidence intervals of the
# parameters $\hat{\beta}$ to the mean squared error. If we can invert
# the product of the design matrices, linear regression gives then a
# simple recipe for fitting our data.
#
#
# Classification problems, however, are concerned with outcomes taking
# the form of discrete variables (i.e. categories). We may for example,
# on the basis of DNA sequencing for a number of patients, like to find
# out which mutations are important for a certain disease; or based on
# scans of various patients' brains, figure out if there is a tumor or
# not; or given a specific physical system, we'd like to identify its
# state, say whether it is an ordered or disordered system (typical
# situation in solid state physics); or classify the status of a
# patient, whether she/he has a stroke or not and many other similar
# situations.
#
# The most common situation we encounter when we apply logistic
# regression is that of two possible outcomes, normally denoted as a
# binary outcome, true or false, positive or negative, success or
# failure etc.
#
#
#
# Logistic regression will also serve as our stepping stone towards
# neural network algorithms and supervised deep learning. For logistic
# learning, the minimization of the cost function leads to a non-linear
# equation in the parameters $\hat{\beta}$. The optimization of the
# problem calls therefore for minimization algorithms. This forms the
# bottle neck of all machine learning algorithms, namely how to find
# reliable minima of a multi-variable function. This leads us to the
# family of gradient descent methods. The latter are the working horses
# of basically all modern machine learning algorithms.
#
# We note also that many of the topics discussed here on logistic
# regression are also commonly used in modern supervised Deep Learning
# models, as we will see later.
#
#
#
# ## Basics
#
# We consider the case where the dependent variables, also called the
# responses or the outcomes, $y_i$ are discrete and only take values
# from $k=0,\dots,K-1$ (i.e. $K$ classes).
#
# The goal is to predict the
# output classes from the design matrix $\hat{X}\in\mathbb{R}^{n\times p}$
# made of $n$ samples, each of which carries $p$ features or predictors. The
# primary goal is to identify the classes to which new unseen samples
# belong.
#
# Let us specialize to the case of two classes only, with outputs
# $y_i=0$ and $y_i=1$. Our outcomes could represent the status of a
# credit card user that could default or not on her/his credit card
# debt. That is
# $$
# y_i = \begin{bmatrix} 0 & \mathrm{no}\\ 1 & \mathrm{yes} \end{bmatrix}.
# $$
# Before moving to the logistic model, let us try to use our linear
# regression model to classify these two outcomes. We could for example
# fit a linear model to the default case if $y_i > 0.5$ and the no
# default case $y_i \leq 0.5$.
#
# We would then have our
# weighted linear combination, namely
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# \hat{y} = \hat{X}^T\hat{\beta} + \hat{\epsilon},
# \label{_auto1} \tag{1}
# \end{equation}
# $$
# where $\hat{y}$ is a vector representing the possible outcomes, $\hat{X}$ is our
# $n\times p$ design matrix and $\hat{\beta}$ represents our estimators/predictors.
#
#
# The main problem with our function is that it takes values on the
# entire real axis. In the case of logistic regression, however, the
# labels $y_i$ are discrete variables. A typical example is the credit
# card data discussed below here, where we can set the state of
# defaulting the debt to $y_i=1$ and not to $y_i=0$ for one the persons
# in the data set (see the full example below).
#
# One simple way to get a discrete output is to have sign
# functions that map the output of a linear regressor to values $\{0,1\}$,
# $f(s_i)=sign(s_i)=1$ if $s_i\ge 0$ and 0 if otherwise.
# We will encounter this model in our first demonstration of neural networks. Historically it is called the ``perceptron" model in the machine learning
# literature. This model is extremely simple. However, in many cases it is more
# favorable to use a ``soft" classifier that outputs
# the probability of a given category. This leads us to the logistic function.
#
#
# The following example on data for coronary heart disease (CHD) as f | unction of age may serve as an illustration. In the code here we read and plot whether a person has had CHD (output = 1) or not (output = 0). This ouput is plotted the person's against age. Clearly, the figure shows that attempting to make a standard linear regression fit may not be very meaningful.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# Common imports
import os
import n | umpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from sklearn.metrics import mean_squared_error
from IPython.display import display
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
infile = open(data_path("chddata.csv"),'r')
# Read the chd data as csv file and organize the data into arrays with age group, age, and chd
chd = pd.read_csv(infile, names=('ID', 'Age', 'Agegroup', 'CHD'))
chd.columns = ['ID', 'Age', 'Agegroup', 'CHD']
output = chd['CHD']
age = chd['Age']
agegroup = chd['Agegroup']
numberID = chd['ID']
display(chd)
plt.scatter(age, output, marker='o')
plt.axis([18,70.0,-0.1, 1.2])
plt.xlabel(r'Age')
plt.ylabel(r'CHD')
plt.title(r'Age distribution and Coronary heart disease')
plt.show()
# What we could attempt however is to plot the mean value for each group.
# In[ ]:
agegroupmean = np.array([0.1, 0.133, 0.250, 0.333, 0.462, 0.625, 0.765, 0.800])
group = np.array([1, 2, 3, 4, 5, 6, 7, 8])
plt.plot(group, agegroupmean, "r-")
plt.axis([0,9,0, 1.0])
plt.xlabel(r'Age group')
plt.ylabel(r'CHD mean values')
plt.title(r'Mean values for each age group')
plt.show()
# We are now trying to find a function $f(y\vert x)$, that is a function which gives us an expected value for the output $y$ with a given input $x$.
# In standard linear regression with a linear dependence on $x$, we would write this in terms of our model
# $$
# f(y_i\vert x_i)=\beta_0+\beta_1 x_i.
# $$
# This expression implies however that $f(y_i\vert x_i)$ could take any
# value from minus infinity to plus infinity. If we however let
# $f(y\vert y)$ be represented by the mean value, the above example
# shows us that we can constrain the function to take values between
# zero and one, that is we have $0 \le f(y_i\vert x_i) \le 1$. Looking
# at our last curve we see also that it has an S-shaped form. This leads
# us to a very popular model for the function $f$, namely the so-called
# Sigmoid function or logistic model. We will consider this function as
# representing the probability for finding a value of $y_i$ with a given
# $x_i$.
#
#
# ## The logistic f |
archifix/settings | sublime/Packages/backrefs/tools/unipropgen.py | Python | mit | 44,164 | 0.001404 | """
Generate a Unicode prop table for Python narrow and wide builds.
Narrow builds will stop at `0xffff`.
"""
from __future__ import unicode_literals
import sys
import struct
import unicodedata
import codecs
import os
import re
__version__ = '4.2.0'
UNIVERSION = unicodedata.unidata_version
UNIVERSION_INFO = tuple([int(x) for x in UNIVERSION.split('.')])
HOME = os.path.dirname(os.path.abspath(__file__))
MAXUNICODE = sys.maxunicode
MAXASCII = 0xFF
NARROW = sys.maxunicode == 0xFFFF
GROUP_ESCAPES = frozenset([ord(x) for x in '-&[\\]^|~'])
# Compatibility
PY3 = sys.version_info >= (3, 0) and sys.version_info[0:2] < (4, 0)
PY34 = sys.version_info >= (3, 4)
PY35 = sys.version_info >= (3, 5)
PY37 = sys.version_info >= (3, 7)
if NARROW:
UNICODE_RANGE = (0x0000, 0xFFFF)
else:
UNICODE_RANGE = (0x0000, 0x10FFFF)
ASCII_RANGE = (0x00, 0xFF)
if PY3:
unichar = chr # noqa
else:
unichar = unichr # noqa
ALL_CHARS = set([x for x in range(UNICODE_RANGE[0], UNICODE_RANGE[1] + 1)])
ALL_ASCII = set([x for x in range(ASCII_RANGE[0], ASCII_RANGE[1] + 1)])
HEADER = '''\
"""Unicode Properties from Unicode version %s (autogen)."""
from __future__ import unicode_literals
''' % UNIVERSION
def uchr(i):
"""Allow getting Unicode character on narrow Python builds."""
try:
return unichar(i)
except ValueError:
return struct.pack('i', i).decode('utf-32')
def uniformat(value):
"""Convert a Unicode char."""
if value in | GROUP_ESCAPES:
# Escape characters that are (or will be in the future) problematic
c = "\\x%02x\\x%02x" % (0x5c, value)
elif value <= 0xFF:
c = "\\x%02x" % value
elif value <= 0xFFFF:
c = "\\u%04x" % value
else:
c = "\\U%08x" % value
return c
def format_name(text):
"""Format the name."""
return text.strip().lower().replace(' ', '').replace('-', '').replace('_', '' | )
def binaryformat(value):
"""Convert a binary value."""
if value in GROUP_ESCAPES:
# Escape characters that are (or will be in the future) problematic
c = "\\x%02x\\x%02x" % (0x5c, value)
else:
c = "\\x%02x" % value
return c
def create_span(unirange, binary=False):
"""Clamp the Unicode range."""
if len(unirange) < 2:
unirange.append(unirange[0])
if binary:
if unirange[0] > MAXASCII:
return None
if unirange[1] > MAXASCII:
unirange[1] = MAXASCII
elif NARROW:
if unirange[0] > MAXUNICODE:
return None
if unirange[1] > MAXUNICODE:
unirange[1] = MAXUNICODE
return [x for x in range(unirange[0], unirange[1] + 1)]
def not_explicitly_defined(table, name, binary=False):
"""Compose a table with the specified entry name of values not explicitly defined."""
all_chars = ALL_ASCII if binary else ALL_CHARS
s = set()
for k, v in table.items():
s.update(v)
if name in table:
table[name] = list(set(table[name]) | (all_chars - s))
else:
table[name] = list(all_chars - s)
def char2range(d, binary=False, invert=True):
"""Convert the characters in the dict to a range in string form."""
fmt = binaryformat if binary else uniformat
maxrange = MAXASCII if binary else MAXUNICODE
for k1 in sorted(d.keys()):
v1 = d[k1]
if not isinstance(v1, list):
char2range(v1, binary=binary, invert=invert)
else:
inverted = k1.startswith('^')
v1.sort()
last = None
first = None
ilast = None
ifirst = None
v2 = []
iv2 = []
if v1 and v1[0] != 0:
ifirst = 0
for i in v1:
if first is None:
first = i
last = i
elif i == last + 1:
last = i
elif first is not None:
if first == last:
v2.append(fmt(first))
else:
v2.append("%s-%s" % (fmt(first), fmt(last)))
if invert and ifirst is not None:
ilast = first - 1
if ifirst == ilast:
iv2.append(fmt(ifirst))
else:
iv2.append("%s-%s" % (fmt(ifirst), fmt(ilast)))
ifirst = last + 1
first = i
last = i
if not v1:
iv2 = ["%s-%s" % (fmt(0), fmt(maxrange))]
elif first is not None:
if first == last:
v2.append(fmt(first))
else:
v2.append("%s-%s" % (fmt(first), fmt(last)))
if invert and ifirst is not None:
ilast = first - 1
if ifirst == ilast:
iv2.append(fmt(ifirst))
else:
iv2.append("%s-%s" % (fmt(ifirst), fmt(ilast)))
ifirst = last + 1
if invert and ifirst <= maxrange:
ilast = maxrange
if ifirst == ilast:
iv2.append(fmt(ifirst))
else:
iv2.append("%s-%s" % (fmt(ifirst), fmt(ilast)))
d[k1] = ''.join(v2)
if invert:
d[k1[1:] if inverted else '^' + k1] = ''.join(iv2)
def gen_blocks(output, ascii_props=False, append=False, prefix=""):
"""Generate Unicode blocks."""
with codecs.open(output, 'a' if append else 'w', 'utf-8') as f:
if not append:
f.write(HEADER)
f.write('%s_blocks = {' % prefix)
no_block = []
last = -1
max_range = MAXASCII if ascii_props else MAXUNICODE
formatter = binaryformat if ascii_props else uniformat
with open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'Blocks.txt'), 'r') as uf:
for line in uf:
if not line.startswith('#'):
data = line.split(';')
if len(data) < 2:
continue
block = [int(i, 16) for i in data[0].strip().split('..')]
if block[0] > last + 1:
if (last + 1) <= max_range:
endval = block[0] - 1 if (block[0] - 1) < max_range else max_range
no_block.append((last + 1, endval))
last = block[1]
name = format_name(data[1])
inverse_range = []
if block[0] > max_range:
if ascii_props:
f.write('\n "%s": "",' % name)
f.write('\n "^%s": "%s-%s",' % (name, formatter(0), formatter(max_range)))
continue
if block[0] > 0:
inverse_range.append("%s-%s" % (formatter(0), formatter(block[0] - 1)))
if block[1] < max_range:
inverse_range.append("%s-%s" % (formatter(block[1] + 1), formatter(max_range)))
f.write('\n "%s": "%s-%s",' % (name, formatter(block[0]), formatter(block[1])))
f.write('\n "^%s": "%s",' % (name, ''.join(inverse_range)))
if last < max_range:
if (last + 1) <= max_range:
no_block.append((last + 1, max_range))
last = -1
no_block_inverse = []
if not no_block:
no_block_inverse.append((0, max_range))
else:
for piece in no_block:
if piece[0] > last + 1:
no_block_inverse.append((last + 1, piece[0] - 1))
last = piece[1]
for block, name in ((no_block, 'noblock'), (no_block_inverse, '^noblock')):
f.write('\n "%s": "' % name)
for piece in block:
if piece[0] == piece[1]:
f.write(formatter(piece[0]))
|
wT-/SpelunkyWadUtility | restore_original.py | Python | unlicense | 843 | 0.033215 | #!/usr/bin/env python
"""
Created on Aug 12, 2013
@author: wT
@version: 1.2
"""
from __future__ import print_function
import sys
sys.dont_writ | e_bytecode = True # It's just clutter for this small scripts
import shutil
import traceback
from unpack import get_wad_path, get_wix_path
if __name__ == '__main__':
try:
input = raw_input # Python 2 <-> 3 fix
except NameError:
pass
try:
yesno = input(r"Overwrite both (potentially modified) .wad and .wix files with originals? [y/n]: "). | lower()
if yesno == "y" or yesno == "yes":
try:
shutil.move(get_wad_path() + ".orig", get_wad_path())
shutil.move(get_wix_path() + ".orig", get_wix_path())
except:
print("No .orig files to restore")
else:
print("Done")
else:
print("Not restoring")
except SystemExit as e:
print(e)
except:
traceback.print_exc()
|
bobbybabra/codeGuild | zombieHS.py | Python | bsd-2-clause | 5,598 | 0.009646 | import random
class Character:
def __init__(self):
self.name = ""
self.life = 20
self.health = random.choice(0, life)
self.zombie_life = 10
self.zombie_health = random.choice(0, zombie_life)
def attack(self, zombie):
self.hit = self.health - self.zombie_health
zombie.life -= self.hit
if self.hit == 0:
print ("..like a nimble sloth, %s evades %s's attack." % (zombie.name, self.name))
else:
print ("%s inflicts debilitating damage on %s!!" % (self.name, zombie.name))
return zombie.health <= 0
class Zombie(Character):
def __init__(self, player):
Character.__init__(self)
ran_adj = random.choice['wretched', 'filthy', 'disgusting', 'oozing']
self.name = "a", rand_adj(), " zombie"
self.health = random.choice(player.health)
class Player(Character):
def __init__(self):
Character.__init__(self)
self.level = 'normal'
self.health = 10
self.health_max = 10
def quit(self):
print ("The zombie virus has infected %s. You are now undead and crave brains.") % self.name
self.health = 0
def help(self):
print Commands.keys()
def status(self):
print ("%s's health: %d/%d" % (self.name, self.health, self.health_max))
def weak(self):
print ("%s is cold, hungry and tired.") % self.name
self.health = max(1, self.health - 1)
def rest(self):
ran_adj = random.choice['under a desk','in a locker','in a closet']
if self.state != 'normal':
print ("keep moving %s, zombies coming in hot!") % (self.name, self.zombie_attack)
else:
print ("%s hides" + ran_adj + " and takes a breather.") % self.name
if randint(0,1):
self.zombie = Zombie(self)
print (%s is surprised by %s) %(self.name self.zombie)
self.state = 'fight'
self.zombie_attacks()
def attack(self):
if randint(0, 1):
self.zombie = Zombie(self)
print ("Look out %s! -%s appears!") % (self.name, self.zombie_name)
self.state = 'fight'
self.zombie_attacks()
else:
if self.health < self.health_max:
self.health = self.health + 1
else:
print ("%s has hidden too long.") % self.name
self.health -= 1
def look_around(self):
if self.state != 'normal':
print ("%s runs into %s") % (self.name, self.zombie.name)
self.zombie_attacks()
else:
print ("%s runs into the "+ look) % self.name
look=random.choice["gymnasium","library","metal shop","cafeteria"]
if random.randint(0, 1):
self.zombie = Zombie(self)
print "%s encounters %s!" % (self.name, self.zombie.name)
self.state = 'fight'
else:
if random.randint(0, 1):
self.tired()
def flee(self):
if self.state != 'fight':
print "%s runs down a corridor" % self.name
self.tired()
else:
if random.randint(1, self.health + 5) > random.randint(1, self.zombie.health):
print "%s flees from %s." % (self.name, self.zombie.name)
self.zombie = None
self.state = 'normal'
else:
print "%s couldn't escape from %s!" % (self.name, self.zombie.name);
self.zombie_attacks()
def attack(self):
if self.state != 'fight':
print "%s flails in the air like a twit." % self.name;
self.tired()
else:
if self.do_damage(self.zombie):
print ("%s decapitates %s!") % (self.name, self.zombie.name)
self.zombie = None
self.state = 'normal'
if random.choice(self.health) < 10:
self.health += 1
self.health_max += 1
print "%s is rejuvenated" % self.name
else:
self.zombie_attacks()
def zombie_attacks(self):
if self.zombie.attack(self):
print ("%s's brains were devoured by %s!!!\nyou are undead and crave BRAINS!!/nunless you're a veggetarian then seek GRAINS!!") % (self.name, self.zombie.name)
def menu():
Commands = {
'quit': Player.quit,
'help': Player.help,
'status': Player.status,
'rest': Player.rest,
'look around': Player.look_around,
' | flee': Player.flee,
'attack': Player.attack,
}
hero = Player()
hero.name = raw_input("What is your character's name? ")
print "(type help to get a list of actions)\n"
print """When %s leaves homeroom, they
a strange stench in the air
m | aybe we are dissecting a frog in biology today...""" % hero.name
while (p.health > 0):
line = raw_input("> ")
args = line.split()
if len(args) > 0:
commandFound = False
for c in Commands.keys():
if args[0] == c[:len(args[0])]:
Commands[c](p)
commandFound = True
break
if not commandFound:
print "%s is confused, enter a command" % p.name
"""
living on the outskirts of a government national lab
has it's pros and cons. when the kids in school
say that a rouge virus has started to infect people
and turn them into zombies, you laugh it off.
"""
|
sgmap/openfisca-france | openfisca_france/scenarios.py | Python | agpl-3.0 | 2,063 | 0.00921 | # -*- coding: utf-8 -*-
import logging
log = logging.getLogger(__name__)
def init_single_entity(scenario, axes = None, enfants = None, famille = None, foyer_fiscal = None, menage = None, parent1 = None, parent2 = None, period = None):
if enfants is None:
enfants = []
assert parent1 is not None
familles = {}
foyers_fiscaux = {}
menages = {}
individus = {}
count_so_far = 0
for nth in range(0, 1):
famille_nth = famille.copy() if famille is not None else {}
foyer_fiscal_nth = foyer_fiscal.copy() if foyer_fiscal is not None else {}
menage_nth = menage.copy() if menage is not None else {}
group = [parent1, parent2] + (enfants or [])
for index, individu in enumerate(group):
if individu is None:
continue
id = individu.get('id')
if id is None:
individu = individu.copy()
id = 'ind{}'.format(index + count_so_far)
individus[id] = individu
if index <= 1:
famille_nth.setdefault('parents', []).append(id)
foyer_fiscal_nth.setdefault('declarants', []).append(id)
if index == 0:
menage_nth['personne_de_reference'] = id
else:
menage_nth['conjoint'] = id
else:
| famille_nth.setdefault('enfants', []).append(id)
foyer_fiscal_nth.setdefault('personnes_a_charge', []).append(id)
menage_nth.setdefault('enfants', []).append(id)
count_so_far += len(group)
familles["f{}".format(nth)] = famille_nth
foyers_fiscaux["ff{}".format(nth)] = foyer_fiscal_nth
menages | ["m{}".format(nth)] = menage_nth
test_data = {
'period': period,
'familles': familles,
'foyers_fiscaux': foyers_fiscaux,
'menages': menages,
'individus': individus
}
if axes:
test_data['axes'] = axes
scenario.init_from_dict(test_data)
return scenario
|
deonwu/PyTEL | src/pytel/ast/_build_tables.py | Python | gpl-2.0 | 762 | 0.011811 | #-----------------------------------------------------------------
# pycparser: _build_tables.py
#
# A dummy for generating the lexing/parsing tables and and
# compiling them into .pyc for faster execution in optimized mode.
# Also generates AST code from th | e _c_ast.yaml configuration file.
#
# Copyright (C) 2008, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
# Generate c_ast.py
#
from _ast_gen import ASTCodeGenerator
ast_gen = ASTCodeGenerator('_c_ast.yaml')
ast_gen.generate(open | ('c_ast.py', 'w'))
import c_parser
# Generates the tables
#
c_parser.CParser(
lex_optimize=True,
yacc_debug=False,
yacc_optimize=True)
# Load to compile into .pyc
#
import lextab
import yacctab
import c_ast
|
Trust-Code/trust-addons | trust_task_time_control/models/__init__.py | Python | agpl-3.0 | 1,625 | 0 | # -*- encoding: utf-8 -*-
###############################################################################
# | #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# Danimar Ribeiro <danimaribeiro@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify | #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from . import project_task
from . import hr_attendance
from . import res_user
|
huyphan/pyyawhois | test/record/parser/test_response_whois_nic_af_status_registered.py | Python | mit | 2,584 | 0.002322 |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.af/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicAfStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.af/status_registered.txt"
host = "whois.nic.af"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "google.af")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
| eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nam | eservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns3.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns4.google.com")
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2009-10-05 03:51:17 UTC'))
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.id, None)
eq_(self.record.registrar.name, "MarkMonitor")
eq_(self.record.registrar.organization, None)
eq_(self.record.registrar.url, "http://www.markmonitor.com")
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2014-01-16 06:50:48 UTC'))
def test_domain_id(self):
eq_(self.record.domain_id, "345679-CoCCA")
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2014-05-05 03:51:17 UTC'))
|
fbkarsdorp/mbmp | mbmp/datatypes.py | Python | gpl-3.0 | 2,861 | 0.003146 | ## Python implementation of MBMA (Van den Bosch & Daelemans 1999)
## Copyright (C) 2011 Folgert Karsdorp
## Author: Folgert Karsdorp <fbkarsdorp@gmail.com>
## URL: <http://github.com/fbkarsdorp/mbmp>
## For licence information, see LICENCE.TXT
class Morpheme(object):
"""
A data class representing a morpheme. Each morpheme specifies
a token form, a lemma form and its part-of-speech tag.
"""
def __init__(self, token='', pos=None, lemma=''):
"""
Constructor.
Args:
- token (str): the token representation of a morpheme
- pos (str): the part-of-speech category of a morpheme
- lemma (str): the lemma representation of a morpheme
If lemma is not given (e.g. because it is equal to token), lemma
if automatically set to token::
>>> m = Morpheme('clapp', 'V', 'clap')
Morpheme(token='clapp', lemma='clap', pos='V')
>>> m Morpheme('clapp', 'V')
Morpheme(token='clapp', lemma='clapp', pos='V')
"""
self.token = token
self.pos = pos
self.lemma = lemma
# if lemma is not specified, default to token
if not self.lemma:
self.lemma = self.token
| def __repr__(self):
return 'Morpheme(token=%s, lemma=%s, pos=%s)' % (
self.token, self.lemma, self.pos)
def __str__(self):
return self.pprint()
def __eq__(self, other):
if not isinstance(other, Morpheme):
return False
return (self.token == other.token and
self.lemma == other.lemma and
self.pos == other.pos)
def __ne__(self, other):
return not (self == other)
def pprint(s | elf, mrepr='tokens-and-lemmas'):
"""
Return a string representation of the morpheme.
Args:
- mrepr: Choose a morpheme representation:
- tokens = token representation
- lemmas = lemma representation
- tokens-and-lemmas = token and lemma representation
separated by a '=' character (zett=zet)
Example usage::
>>> m = Morpheme('clapp', 'V', 'clap')
>>> m.pprint(mrepr='tokens')
clapp
>>> m.pprint(mrepr='lemmas')
clap
>>> m.pprint(mrepr='tokens-and-lemmas')
clapp=clap
"""
if mrepr == 'tokens-and-lemmas':
if self.lemma != self.token:
leaf = '='.join([self.token, self.lemma])
else:
leaf = self.token
elif mrepr == 'lemmas':
leaf = self.lemma
else:
leaf = self.token
return leaf
|
stackingfunctions/practicepython | src/02-even_odd.py | Python | mit | 415 | 0.004819 | number = int(input("Give me a number and I will tellyou if i | t is even or odd. Your number: "))
divisor = int(input("What divisor would you like to check? "))
if number % 2 == 0:
print("%d is EVEN" % number)
else:
print("%d is ODD" % number)
if number % 4 == 0:
print("%d is evenly divisible by 4." % number)
if number % divisor == 0:
print("%d is evenly divisible by %d." | % (number, divisor))
|
adaschevici/firebase-demo | python-back/api/firebase/firebase.py | Python | mit | 1,373 | 0.00437 | import pyrebase
import random
try:
import constants
config = {
"apiKey": constants.API_KEY,
"authDomain": constants.AUTH_DOMAIN,
"databaseURL": constants.DB_URL,
"projectId": constants.PROJECT_ID,
"storageBucket": constants.STORAGE_BUCKET,
"messagingSenderId": constants.SENDER_ID
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
except Exception:
pass
user = {}
def firebase_insert_user(user_id):
# Log the user in
global user
if not user.get('idToken'):
print("Logging in")
user = auth.sign_in_with_email_and_password(constants.USER_EMAIL, constants.USER_PASSWORD)
# Get a reference to the database service
db = firebase.database()
# data to save
data = {
"claps": 0,
"colorValue": random.randint(1, 51)
}
# Pass the | user's idToken to the push method
results = db.child("users").child(user_id).set(data, user['idToken'])
def firebase_update_claps(user_id):
global user
if not user.get('idToken'):
user = auth.sign_in_with_email_and_password(constants.USER_EMAIL, constants.USER_PASSWORD)
db = firebase.database()
claps = db.child("users").chil | d(user_id).get(user['idToken']).val()['claps']
db.child("users").child(user_id).update({"claps": claps + 1}, user['idToken'])
|
anhstudios/swganh | data/scripts/templates/object/tangible/furniture/all/shared_frn_all_throwpillow_med_s02.py | Python | mit | 465 | 0.047312 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### P | LEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
re | sult = Tangible()
result.template = "object/tangible/furniture/all/shared_frn_all_throwpillow_med_s02.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_throwpillow")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
hzlf/openbroadcast.ch | app/rating/api/views.py | Python | gpl-3.0 | 1,458 | 0.002058 | # -*- coding: utf-8 -*-
import logging
from django.conf import settings
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import authentication_classes, permission_classes
from ..utils import get_remo | te_votes, update_remote_vote
API_BASE_URL = getattr(settings, 'API_BASE_URL', None)
API_BASE_AUTH = getattr(settings, 'API_BASE_AUTH', None)
log = logging.getLogger(__name__)
channel_layer = get_channel_layer()
@api_view(['GET', 'POST'])
@authentication_classes((Session | Authentication,))
@permission_classes((IsAuthenticatedOrReadOnly,))
def vote_detail(request, obj_ct, obj_pk):
if request.method == 'POST':
value = request.data.get('value')
user_remote_id = request.user.remote_id
votes, status_code = update_remote_vote(obj_ct, obj_pk, value, user_remote_id)
# TODO: maybe implement in another place.
channel = 'rating'
data = {
'type': 'votes',
'content': votes
}
async_to_sync(channel_layer.group_send)(
channel, data
)
else:
votes, status_code = get_remote_votes(obj_ct, obj_pk)
return Response(votes, status=status_code)
|
khillion/ToolDog | tooldog/analyse/code_collector.py | Python | mit | 2,950 | 0.002712 | #!/usr/bin/env python3
import logging
import os
import urllib.parse
import urllib.request
import tarfile
from tooldog import TMP_DIR
from .utils import *
LOGGER = logging.getLogger(__name__)
class CodeCollector(object):
"""
Class to download source code from a https://bio.tools entry
"""
ZIP_NAME = "tool.zip"
TAR_NAME = "tool.tar"
TMP_NAME = "tmp"
def __init__(self, biotool):
"""
:param biotool: Biotool object
:type biotool: :class:`tooldog.biotool_model.Biotool`
"""
self.biotool = biotool
def _make_tar(self, file_path, tarname):
with tarfile.open(tarname, mode='w') as archive:
archive.add(file_path, arcname=self.ZIP_NAME)
def _get_from_repository(self, url):
"""
Get source code from a repository link
:param url: url of the repository
:type url: STRING
"""
# Here we deal with repository, have to use regex to test the url and
# use appropriate strategy to get the code depending the type of repository
if "github.com" in url:
return self._get_from_github(url)
else:
LOGGER.warn("The url ({}) is not a Github url".format(url))
LOGGER.warn("ToolDog only deals with Github repository for the moment...")
def _get_from_github(self, url):
try:
zip_url = os.path.join(url, "archive/master.zip")
response = urllib.request.urlopen(zip_url)
data = response.read()
LOGGER.info('Writing data to zip file...')
zip_path = os.path.join(TMP_DIR, self.ZIP_NAME)
tar_path = os.path.j | oin(TMP_DIR, self.TAR_NAME)
write_to_file(zip_pa | th, data, 'wb')
LOGGER.info('Making tar...')
self._make_tar(zip_path, tar_path)
return tar_path
except:
LOGGER.warn('Something went wrong with the following Github repository: {}'.format(zip_url))
def _get_from_source_code(self, url):
"""
Get source code from a source code link
:param url: url of the source code
:type url: STRING
"""
return None
def get_source(self):
"""
Retrieve source code of the tool using links provided in https://bio.tools
"""
source_code = None
links = self.biotool.informations.links
for link in links:
link_type = link.type.lower().translate(str.maketrans(' ', '_'))
try:
source_code = getattr(self, '_get_from_{}'.format(link_type))(link.url)
except AttributeError:
LOGGER.warn(link_type + ' link type is not processed yet by ToolDog.')
if source_code is not None:
# For the moment, consider that if a source code has been found,
# we just leave the loop.
break
return source_code
|
factorlibre/l10n-spain | l10n_es_partner/gen_src/__init__.py | Python | agpl-3.0 | 95 | 0 | # License AG | PL-3.0 or | later (https://www.gnu.org/licenses/agpl).
from . import gen_data_banks
|
ramusus/django-oauth-tokens | oauth_tokens/migrations/0006_auto__chg_field_accesstoken_access_token.py | Python | bsd-3-clause | 4,058 | 0.007886 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'AccessToken.access_token'
db.alter_column(u'oauth_tokens_accesstoken', 'access_token', self.gf('django.db.models.fields.CharField')(max_length=500))
def backwards(self, orm):
# Changing field 'AccessToken.access_token'
db.alter_column(u'oauth_tokens_accesstoken', 'access_token', self.gf('django.db.models.fields.CharField')(max_length=200))
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'oauth_tokens.accesstoken': | {
'Meta': {'ordering': "('-granted',)", 'object_name': 'AccessToken'},
'access_token': ('django.db.models.fields.CharFiel | d', [], {'max_length': '500'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'granted': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'scope': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'token_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oauth_tokens.UserCredentials']", 'null': 'True', 'blank': 'True'})
},
u'oauth_tokens.usercredentials': {
'Meta': {'object_name': 'UserCredentials'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'additional': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['oauth_tokens'] |
VioletRed/script.module.urlresolver | lib/urlresolver/plugins/vidbull.py | Python | gpl-2.0 | 2,788 | 0.003945 | '''
Vidbull urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 o | f the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://w | ww.gnu.org/licenses/>.
'''
import re
import urllib2
import urllib
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
USER_AGENT = 'Mozilla/5.0 (Linux; Android 4.4; Nexus 5 Build/BuildID) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36'
class VidbullResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "vidbull"
domains = [ "vidbull.com" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
try:
headers = {
'User-Agent': USER_AGENT
}
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url, headers=headers).content
match = re.search('<source\s+src="([^"]+)', html)
if match:
return match.group(1)
else:
raise Exception('File Link Not Found')
except urllib2.HTTPError as e:
common.addon.log_error(self.name + ': got http error %d fetching %s' % (e.code, web_url))
return self.unresolvable(code=3, msg=e)
except Exception as e:
common.addon.log('**** Vidbull Error occured: %s' % e)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.vidbull.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/(?:embed-)?([0-9a-zA-Z]+)',url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?vidbull.com/(?:embed-)?' +
'[0-9A-Za-z]+', url) or
'vidbull' in host)
|
scottbarstow/iris-python | iris_sdk/models/maps/roles_list.py | Python | mit | 119 | 0.016807 | #!/usr/bin/env python
from iris_sdk.mod | els.maps.base_map import BaseMap
class RolesListMap(BaseMap):
| role = None |
rouzazari/fuzzyflask | app/main/datatools.py | Python | mit | 388 | 0 | from | fuzzywuzzy import process
def dictionary_match(item, dictionary,
allow_low_match=False, low_match_threshold=90):
if item in dictionary:
return item, 100
matched_item, score = process.extractOne(item, dictionary)
if score < low_match_threshold and not allow_low_match:
return item, score
else:
return matched_item, sc | ore
|
googleinterns/schemaorg-generator | protogenerator/tests/test_utils.py | Python | apache-2.0 | 9,030 | 0.001329 | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, | either express or implied.
# See the License for the specific language governing permissions and
# limitations unde | r the License.
import utils.utils as utils
import rdflib
def test_to_snake_case():
"""Test utils.to_snake_case function.
Procedure:
- Create input and expected output for the following cases.
* Pascal Case.
* Pascal Case with multiple upper case characters at the start.
* Camel Case.
* Camel Case with multiple upper case characters in the middle.
- Call utils.to_snake_case for all the inputs.
Verification:
- Check if all the outputs generated by the function is equal to
corresponding expected output.
"""
ip1 = 'NameName'
op1 = 'name_name'
ip2 = 'NAMEName'
op2 = 'name_name'
ip3 = 'nameName'
op3 = 'name_name'
ip4 = 'nameABCName'
op4 = 'name_abc_name'
assert utils.to_snake_case(ip1) == op1, 'Test pascal case.'
assert utils.to_snake_case(
ip2) == op2, 'Test pascal case with multiple upper case at start.'
assert utils.to_snake_case(ip3) == op3, 'Test camel case.'
assert utils.to_snake_case(
ip4) == op4, 'Test camel case with multiple upper case at middle.'
def test_get_property_name():
"""Test utils.get_property_name function.
Procedure:
- Create input and expected output for the following cases.
* Lower Case.
* Pascal Case.
* Camel Case.
- Call utils.get_property_name for all the inputs.
Verification:
- Check if all the outputs generated by the function is equal to
corresponding expected output.
"""
ip1 = 'name'
op1 = 'NameProperty'
ip2 = 'Name'
op2 = 'NameProperty'
ip3 = 'nameProperty'
op3 = 'NamePropertyProperty'
assert utils.get_property_name(ip1) == op1, 'Test lower case.'
assert utils.get_property_name(ip2) == op2, 'Test pascal case.'
assert utils.get_property_name(ip3) == op3, 'Test camel case.'
def test_get_enum_value_name():
"""Test utils.get_enum_value_name function.
Procedure:
- Create input and expected output for the following cases.
* Camel Case.
* Pascal Case.
* Pascal Case with multiple upper case characters at the start.
* Pascal Case with multiple upper case characters at the middle.
- Call utils.get_enum_value_name for all the inputs.
Verification:
- Check if all the outputs generated by the function is equal to
corresponding expected output.
"""
ip1 = 'enumValue'
op1 = 'ENUM_VALUE'
ip2 = 'EnumValue'
op2 = 'ENUM_VALUE'
ip3 = 'ENUMValue'
op3 = 'ENUM_VALUE'
ip4 = 'EnumABCValue'
op4 = 'ENUM_ABC_VALUE'
assert utils.get_enum_value_name(ip1) == op1, 'Test camel case.'
assert utils.get_enum_value_name(ip2) == op2, 'Test pascal case.'
assert utils.get_enum_value_name(
ip3) == op3, 'Test pascal case with multiple upper case at start.'
assert utils.get_enum_value_name(
ip4) == op4, 'Test pascal case with multiple upper case at middle.'
def test_strip_url():
"""Test utils.strip_url function.
Procedure:
- Create input and expected output for the following cases.
* Normal URL.
* Non URL.
* URL without the protocol prefix.
* URL that points to subroute.
- Call utils.strip_url for all the inputs.
Verification:
- Check if all the outputs generated by the function is equal to
corresponding expected output.
"""
ip1 = 'http://abc.com/entity'
op1 = 'entity'
ip2 = 'entity'
op2 = 'entity'
ip3 = 'abc.com/entity'
op3 = 'entity'
ip4 = 'http://abc.com/sub/entity'
op4 = 'entity'
assert utils.strip_url(ip1) == op1, 'Test a normal URL.'
assert utils.strip_url(ip2) == op2, 'Test a non URL.'
assert utils.strip_url(ip3) == op3, 'Test a URL without protocol.'
assert utils.strip_url(ip4) == op4, 'Test a nested URL.'
def test_get_class_type():
"""Test utils.get_class_type function.
Procedure:
- Create a list of defined classes.
- Create input and expected output for the following cases.
* Defined Class.
* Undefined Class.
* Dattypes.
- Call utils.get_class_type for all the inputs.
Verification:
- Check if all the outputs generated by the function is equal to
corresponding expected output.
"""
class_list = {'randomClass', 'RandomClass'}
ip1 = 'RandomClass'
op1 = 'RandomClass'
ip2 = 'randomClass'
op2 = 'randomClass'
ip3 = 'randomclass'
op3 = 'string'
ip4 = 'Text'
op4 = 'string'
ip5 = 'Number'
op5 = 'double'
ip6 = 'Float'
op6 = 'double'
ip7 = 'Integer'
op7 = 'int64'
ip8 = 'URL'
op8 = 'string'
ip9 = 'Boolean'
op9 = 'bool'
assert utils.get_class_type(
ip1, class_list) == op1, 'Test if for a defined class the class name is returned.'
assert utils.get_class_type(
ip2, class_list) == op2, 'Test if for a defined class the class name is returned.'
assert utils.get_class_type(
ip3, class_list) == op3, 'Test if for an undefined class the string is returned.'
assert utils.get_class_type(
ip4, class_list) == op4, 'Test if Text is mapped to string.'
assert utils.get_class_type(
ip5, class_list) == op5, 'Test if Number is mapped to double.'
assert utils.get_class_type(
ip6, class_list) == op6, 'Test if Float is mapped to double.'
assert utils.get_class_type(
ip7, class_list) == op7, 'Test if Integer is mapped to int64.'
assert utils.get_class_type(
ip8, class_list) == op8, 'Test if URL is mapped to string.'
assert utils.get_class_type(
ip9, class_list) == op9, 'Test if Boolean is mapped to bool.'
def test_add_url():
"""Test utils.add_url function.
Procedure:
- Create a couple of inputs and expected outputs.
- Call utils.add_url for all the inputs.
Verification:
- Check if all the outputs generated by the function is equal to
corresponding expected output.
"""
ip1 = 'abc'
op1 = rdflib.URIRef('http://schema.org/abc')
ip2 = 'Abc'
op2 = rdflib.URIRef('http://schema.org/Abc')
assert utils.add_url(ip1) == op1, 'Test string'
assert utils.add_url(ip2) == op2, 'Test string'
def test_topological_sort():
"""Test utils.toplogical_sort function.
Procedure:
- Create a dict.
- Populate the dict like with adjacency list representation of every
node.
- Create an expected output of topological ordering.
- Call utils.toplogical_sort for the graph.
Verification:
- Check if all the output generated by the function is the same order
are expected output.
"""
graph = dict()
for i in range(6):
graph[i] = set()
graph[2].add(3)
graph[3].add(1)
graph[4].add(0)
graph[4].add(1)
graph[5].add(0)
graph[5].add(2)
answer = [5, 4, 2, 3, 1, 0]
assert utils.topological_sort(graph) == answer, 'Test toplogical_sort.'
def test_get_children():
"""Test utils.get_childern function.
Procedure:
- Create a graph dict.
- Populate the graph dict like with adjacency list representation of
every node.
- The final graph shud be of tree structure and must be multiple levels
deep.
- Create another answer dict.
- Populate answer dict with all the children of every node.
- |
pcmagic/stokes_flow | ecoli_in_pipe/ecoliInPipe_singletail.py | Python | mit | 4,799 | 0.003334 | # coding=utf-8
# 1. generate velocity and force nodes of sphere using MATLAB,
# 2. for each force node, get b, solve surrounding velocity boundary condition (pipe and cover, named boundary velocity) using formula from Liron's paper, save .mat file
# 3. read .mat file, for each boundary velocity, solve associated boundary force.
# 4. solve sphere M matrix using boundary force.
# 5. solve problem and check.
import sys
import petsc4py
petsc4py.init(sys.argv)
# from time import time
# from scipy.io import loadmat
# from src.stokes_flow import problem_dic, obj_dic
# from src.geo import *
from petsc4py import PETSc
from src import stokes_flow as sf
from src.myio import *
from src.objComposite import *
from src.myvtk import save_singleEcoli_vtk
from codeStore.ecoli_common import *
# def get_problem_kwargs(**main_kwargs):
# problem_kwargs = get_solver_kwargs()
# OptDB = PETSc.Options()
# fileHandle = OptDB.``('f', 'ecoliInPipe')
# OptDB.setValue('f', fileHandle)
# problem_kwargs['fileHandle'] = fileHandle
#
# kwargs_list = (get_vtk_tetra_kwargs(), get_ecoli_kwargs(), get_forcefree_kwargs(), main_kwargs,)
# for t_kwargs in kwargs_list:
# for key in t_kwargs:
# problem_kwargs[key] = t_kwargs[key]
# return problem_kwargs
#
#
# def print_case_info(**problem_kwargs):
# fileHandle = problem_kwargs['fileHandle']
# PETSc.Sys.Print('-->Ecoli in pipe case, force free case.')
# print_solver_info(**problem_kwargs)
# print_forcefree_info(**problem_kwargs)
# print_ecoli_info(fileHandle, **problem_kwargs)
# return True
# @profile
def main_fun(**main_kwargs):
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'ecoliInPipe')
OptDB.setValue('f', fileHandle)
main_kwargs['fileHandle'] = fileHandle
problem_kwargs = get_problem_kwargs(**main_kwargs)
if not problem_kwargs['restart']:
forcepipe = problem_kwargs['forcepipe']
print_case_info(**problem_kwargs)
ecoliHeadType = OptDB.getString('ecoliHeadType', 'tunnel')
if 'ellipse' in ecoliHeadType:
ecoli_comp0 = createEcoliComp_ellipse(name='ecoli_0', **problem_kwargs)
elif 'tunnel' in ecoliHeadType:
ecoli_comp0 = createEcoliComp_tunnel(name='ecoli_0', **problem_kwargs)
else:
err_msg = 'wrong ecoliHeadType | '
raise ValueError(err_msg)
ecoli_comp = sf.ForceFreeComposite(center=ecoli_comp0.get_center(), name='ecoli_0')
ecoli_comp.add_obj(ecoli_comp0.get_obj_list()[0], rel_U=ecoli_comp0.get_rel_U_list()[0])
ecoli_comp.add_obj(ecoli_comp0.get_obj_list()[1], rel_U=ecoli_comp0.get_rel_U_list()[1])
iterateTolerate = OptDB.getReal('iterateTolerat | e', 1e-4)
problem = sf.StokesletsInPipeforcefreeIterateProblem(**problem_kwargs)
problem.set_prepare(forcepipe)
problem.add_obj(ecoli_comp)
if problem_kwargs['pickProblem']:
problem.pickmyself(fileHandle, ifcheck=True)
problem.set_iterate_comp(ecoli_comp)
problem.print_info()
problem.create_matrix()
refU, Ftol, Ttol = problem.do_iterate(tolerate=iterateTolerate)
ecoli_comp.set_ref_U(refU)
PETSc.Sys.Print('---->>>reference velocity is', refU)
# post process
head_U, tail_U = print_single_ecoli_forcefree_result(ecoli_comp, **problem_kwargs)
ecoli_U = ecoli_comp.get_ref_U()
save_singleEcoli_vtk(problem, createHandle=createEcoliComp_tunnel)
else:
head_U, tail_U, ecoli_U = ecoli_restart(**main_kwargs)
return head_U, tail_U, ecoli_U
# t_name = check_file_extension(fileHandle, '_pick.bin')
# with open(t_name, 'rb') as myinput:
# unpick = pickle.Unpickler(myinput)
# problem = unpick.load()
# problem.unpick_myself()
# ecoli_comp = problem.get_obj_list()[0]
#
# problem_kwargs = problem.get_kwargs()
# problem_kwargs1 = get_problem_kwargs(**main_kwargs)
# problem_kwargs['matname'] = problem_kwargs1['matname']
# problem_kwargs['bnodesHeadle'] = problem_kwargs1['bnodesHeadle']
# problem_kwargs['belemsHeadle'] = problem_kwargs1['belemsHeadle']
# problem_kwargs['ffweightx'] = problem_kwargs1['ffweightx']
# problem_kwargs['ffweighty'] = problem_kwargs1['ffweighty']
# problem_kwargs['ffweightz'] = problem_kwargs1['ffweightz']
# problem_kwargs['ffweightT'] = problem_kwargs1['ffweightT']
# # PETSc.Sys.Print([attr for attr in dir(problem) if not attr.startswith('__')])
# # PETSc.Sys.Print(problem_kwargs1['ffweightT'])
#
# problem.set_kwargs(**problem_kwargs)
# print_case_info(**problem_kwargs)
# problem.print_info()
# problem.set_force_free()
# problem.solve()
if __name__ == '__main__':
main_fun()
|
jasonzou/MyPapers | bibserver/search.py | Python | mit | 19,340 | 0.012048 | """
search.py
"""
from flask import Flask, request, redirect, abort, make_response
from flask import render_template, flash
import bibserver.dao
from bibserver import auth
import json, httplib
from bibserver.config import config
import bibserver.util as util
import logging
from logging.handlers import RotatingFileHandler
LOG_FILENAME="./app.log"
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=10000000, backupCount=5)
handler.setFormatter(formatter)
log.addHandler(handler)
class Search(object):
def __init__(self,path,current_user):
self.path = path.replace(".json","")
self.current_user = current_user
# facets -> convert to aggs
self.search_options = {
'search_url': '/query?',
'search_index': 'elasticsearch',
'paging': { 'from': 0, 'size': 10 },
#'predefined_filters': {},
#'facets': config['search_facet_fields'],
'result_display': config['search_result_display'],
'search_sortby': [{'display':'year', 'field':'year.exact'},
{'display':'author','field':'author.name'},
{'display':'journal','field':'journal.name'}],
'searchbox_fieldselect': [
{'display':'author','field':'author.name'},
{'display':'journal','field':'journal.name'}]#,
#'addremovefacets': config['add_remove_facets'] # (full list could also be pulled from DAO)
}
self.parts = self.path.strip('/').split('/')
def find(self):
log.debug(self.parts[0])
log.debug(self.parts)
log.debug(len(self.parts))
if bibserver.dao.Account.get(self.parts[0]):
if len(self.parts) == 1:
return self.account() # user account
elif len(self.parts) == 2:
if self.parts[1] == "collections":
return self.collections()
else:
return self.collection() # get a collection
elif len(self.parts) == 3:
return self.record() # get a record in collection
elif self.parts[0] == 'collections':
return self.collections() # get search list of all collections
elif len(self.parts) == 1:
if self.parts[0] != 'search':
self.search_options['q'] = self.parts[0]
return self.default() # get search result of implicit search term
elif len(self.parts) == 2:
return self.implicit_facet() # get search result of implicit facet filter
else:
abort(404)
def default(self):
# default search page
if util.request_wants_json():
res = bibserver.dao.Record.query()
resp = make_response(
json.dumps([i['_source'] for i in res._hits],
sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('search/index.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
collection=None
)
# TODO: convert facet => aggs
def implicit_facet(self):
self.search_options['predefined_filters'][self.parts[0]+config['facet_field']] = self.parts[1]
# remove the implicit facet from facets
for count,facet in enumerate(self.search_options['facets']):
if facet['field'] == self.parts[0]+config['facet_field']:
del self.search_options['facets'][count]
if util.request_wants_json():
res = bibserver.dao.Record.query(terms=self.search_options['predefined_filters'])
resp = make_response( json.dumps([i['_source'] for i in res._hits], sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
return render_template('search/index.html',
current_user=self.current_user,
search_options=json.dumps(self.search_options),
collection=None,
implicit=self.parts[0]+': ' + self.parts[1]
)
def collections(self):
if len(self.parts) == 1:
if util.request_wants_json():
res = bibserver.dao.Collection.query(size=1000000)
colls = [i['_source'] for i in res._hits]
resp = make_response( json.dumps(colls, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
# search collection records
self.search_options['search_url'] = '/query/collection?'
self.search_options['facets'] = [{'field':'owner','size':100},{'field':'_created','size':100}]
self.search_options['result_display'] = [[{'pre':'<h3>','field':'label','post':'</h3>'}],[{'field':'description'}],[{'pre':'created by ','field':'owner'}]]
self.search_options['result_display'] = config['collections_result_display']
return render_template('collection/index.html', current_user=self.current_user, search_options=json.dumps(self.search_options), collection=None)
elif len(self.parts) == 2:
if self.parts[0] == "collections":
acc = bibserver.dao.Account.get(self.parts[1])
else:
acc = bibserver.dao.Account.get(self.parts[0])
if acc:
resp = make_response( json.dumps([coll.data for coll in acc.collections], sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
abort(404)
elif len(self.parts) == 3:
coll = bibserver.dao.Collection.get_by_owner_coll(self.parts[1],self.parts[2])
if coll:
coll.data['records'] = len(coll)
resp = make_response( json.dumps(coll.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
abort(404)
else:
abort(404)
def record(self):
found = None
res = bibserver.dao.Record.query(terms = {
'owner'+config['facet_field']:self.parts[0],
'collection'+config['facet_field']:self.parts[1],
'id'+config['facet_field']:self.parts[2]
})
if res.total == 0:
rec = bibserver.dao.Record.get(self.parts[2])
if rec: found = 1
elif res.total == 1:
rec = bibserver.dao.Re | cord.get(res._hits[0]['_id'])
found = 1
else:
found = 2
if not found:
abort(404)
elif found == 1:
collection = bibserver.dao.Collection.get_by_owner_coll(rec.data['owner'],rec.data['collection'])
if request.method == 'DELETE':
if r | ec:
if not auth.collection.update(self.current_user, collection):
abort(401)
rec.delete()
abort(404)
else:
abort(404)
elif request.method == 'POST':
if rec:
if not auth.collection.update(self.current_user, collection):
abort(401)
rec.data = request.json
rec.save()
resp = make_response( json.dumps(rec.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
if util.request_wants_json():
resp = make_response( json.dumps(rec.data, sort_keys=True, indent=4) )
resp.mimetype = "application/json"
return resp
else:
admin = True if aut |
vd4mmind/MACS | MACS2/OptValidator.py | Python | bsd-3-clause | 26,830 | 0.010175 | # Time-stamp: <2015-05-19 13:42:30 Tao Liu>
"""Module Description
Copyright (c) 2010,2011 Tao Liu <taoliu@jimmy.harvard.edu>
This code is free software; you can re | distribute it and/or modify it
under the terms of the BSD License (see the file | COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: Tao Liu
@contact: taoliu@jimmy.harvard.edu
"""
# ------------------------------------
# python modules
# ------------------------------------
import sys
import os
import re
import logging
from argparse import ArgumentError
from subprocess import Popen, PIPE
from math import log
from MACS2.IO.Parser import BEDParser, ELANDResultParser, ELANDMultiParser, \
ELANDExportParser, SAMParser, BAMParser, \
BAMPEParser, BowtieParser, guess_parser
# ------------------------------------
# constants
# ------------------------------------
efgsize = {"hs":2.7e9,
"mm":1.87e9,
"ce":9e7,
"dm":1.2e8}
# ------------------------------------
# Misc functions
# ------------------------------------
def opt_validate ( options ):
"""Validate options from a OptParser object.
Ret: Validated options object.
"""
# gsize
try:
options.gsize = efgsize[options.gsize]
except:
try:
options.gsize = float(options.gsize)
except:
logging.error("Error when interpreting --gsize option: %s" % options.gsize)
logging.error("Available shortcuts of effective genome sizes are %s" % ",".join(efgsize.keys()))
sys.exit(1)
# format
options.gzip_flag = False # if the input is gzip file
options.format = options.format.upper()
if options.format == "ELAND":
options.parser = ELANDResultParser
elif options.format == "BED":
options.parser = BEDParser
elif options.format == "ELANDMULTI":
options.parser = ELANDMultiParser
elif options.format == "ELANDEXPORT":
options.parser = ELANDExportParser
elif options.format == "SAM":
options.parser = SAMParser
elif options.format == "BAM":
options.parser = BAMParser
options.gzip_flag = True
elif options.format == "BAMPE":
options.parser = BAMPEParser
options.gzip_flag = True
options.nomodel = True
elif options.format == "BOWTIE":
options.parser = BowtieParser
elif options.format == "AUTO":
options.parser = guess_parser
else:
logging.error("Format \"%s\" cannot be recognized!" % (options.format))
sys.exit(1)
# duplicate reads
if options.keepduplicates != "auto" and options.keepduplicates != "all":
if not options.keepduplicates.isdigit():
logging.error("--keep-dup should be 'auto', 'all' or an integer!")
sys.exit(1)
# shiftsize>0
#if options.shiftsize: # only if --shiftsize is set, it's true
# options.extsize = 2 * options.shiftsize
#else: # if --shiftsize is not set
# options.shiftsize = options.extsize / 2
if options.extsize < 1 :
logging.error("--extsize must >= 1!")
sys.exit(1)
# refine_peaks, call_summits can't be combined with --broad
#if options.broad and (options.refine_peaks or options.call_summits):
# logging.error("--broad can't be combined with --refine-peaks or --call-summits!")
# sys.exit(1)
if options.broad and options.call_summits:
logging.error("--broad can't be combined with --call-summits!")
sys.exit(1)
if options.pvalue:
# if set, ignore qvalue cutoff
options.log_qvalue = None
options.log_pvalue = log(options.pvalue,10)*-1
else:
options.log_qvalue = log(options.qvalue,10)*-1
options.log_pvalue = None
if options.broad:
options.log_broadcutoff = log(options.broadcutoff,10)*-1
# uppercase the format string
options.format = options.format.upper()
# upper and lower mfold
options.lmfold = options.mfold[0]
options.umfold = options.mfold[1]
if options.lmfold > options.umfold:
logging.error("Upper limit of mfold should be greater than lower limit!" % options.mfold)
sys.exit(1)
# output filenames
options.peakxls = os.path.join( options.outdir, options.name+"_peaks.xls" )
options.peakbed = os.path.join( options.outdir, options.name+"_peaks.bed" )
options.peakNarrowPeak = os.path.join( options.outdir, options.name+"_peaks.narrowPeak" )
options.peakBroadPeak = os.path.join( options.outdir, options.name+"_peaks.broadPeak" )
options.peakGappedPeak = os.path.join( options.outdir, options.name+"_peaks.gappedPeak" )
options.summitbed = os.path.join( options.outdir, options.name+"_summits.bed" )
options.bdg_treat = os.path.join( options.outdir, options.name+"_treat_pileup.bdg" )
options.bdg_control= os.path.join( options.outdir, options.name+"_control_lambda.bdg" )
if options.cutoff_analysis:
options.cutoff_analysis_file = os.path.join( options.outdir, options.name+"_cutoff_analysis.txt" )
else:
options.cutoff_analysis_file = None
#options.negxls = os.path.join( options.name+"_negative_peaks.xls" )
#options.diagxls = os.path.join( options.name+"_diag.xls" )
options.modelR = os.path.join( options.outdir, options.name+"_model.r" )
#options.pqtable = os.path.join( options.outdir, options.name+"_pq_table.txt" )
# logging object
logging.basicConfig(level=(4-options.verbose)*10,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
options.error = logging.critical # function alias
options.warn = logging.warning
options.debug = logging.debug
options.info = logging.info
options.argtxt = "\n".join((
"# Command line: %s" % " ".join(sys.argv[1:]),\
"# ARGUMENTS LIST:",\
"# name = %s" % (options.name),\
"# format = %s" % (options.format),\
"# ChIP-seq file = %s" % (options.tfile),\
"# control file = %s" % (options.cfile),\
"# effective genome size = %.2e" % (options.gsize),\
#"# tag size = %d" % (options.tsize),\
"# band width = %d" % (options.bw),\
"# model fold = %s\n" % (options.mfold),\
))
if options.pvalue:
if options.broad:
options.argtxt += "# pvalue cutoff for narrow/strong regions = %.2e\n" % (options.pvalue)
options.argtxt += "# pvalue cutoff for broad/weak regions = %.2e\n" % (options.broadcutoff)
options.argtxt += "# qvalue will not be calculated and reported as -1 in the final output.\n"
else:
options.argtxt += "# pvalue cutoff = %.2e\n" % (options.pvalue)
options.argtxt += "# qvalue will not be calculated and reported as -1 in the final output.\n"
else:
if options.broad:
options.argtxt += "# qvalue cutoff for narrow/strong regions = %.2e\n" % (options.qvalue)
options.argtxt += "# qvalue cutoff for broad/weak regions = %.2e\n" % (options.broadcutoff)
else:
options.argtxt += "# qvalue cutoff = %.2e\n" % (options.qvalue)
if options.downsample:
options.argtxt += "# Larger dataset will be randomly sampled towards smaller dataset.\n"
if options.seed >= 0:
options.argtxt += "# Random seed has been set as: %d\n" % options.seed
else:
if options.tolarge:
options.argtxt += "# Smaller dataset will be scaled towards larger dataset.\n"
else:
options.argtxt += "# Larger dataset will be scaled towards smaller dataset.\n"
if options.ratio != 1.0:
options.argtxt += "# Using a custom scaling factor: %.2e\n" % (options.ratio)
if options.cfile:
options.argtxt += "# Range for calculating regional lambda is: %d bps and %d bps\n" % (options.smalllocal,options.largelocal)
else:
option |
ftrautsch/testEvolution | resultprocessor/coveragemodels.py | Python | apache-2.0 | 1,211 | 0.002477 | from mongoengine import Document, StringField, DateTimeField, ListField, DateTimeField, IntField, BooleanField, \
ObjectIdFi | eld, FloatField
class Covelement(Document):
instructionsCov = IntField()
instructionsMis = IntField()
branchesCo | v = IntField()
branchesMis = IntField()
lineCov = IntField()
lineMis = IntField()
complexityCov = IntField()
complexityMis = IntField()
methodCov = IntField()
methodMis = IntField()
class Covproject(Covelement):
classCov = IntField()
classMis = IntField()
class Covpackage(Covelement):
classCov = IntField()
classMis = IntField()
name = StringField(required=True)
class CovClass(Covelement):
classCov = IntField()
classMis = IntField()
name = StringField(required=True)
class CovMethod(Covelement):
name = StringField(required=True)
desc = StringField(required=True)
line = IntField()
class CovSourcefile(Covelement):
classCov = IntField()
classMis = IntField()
name = StringField(required=True)
class CovLine():
number = IntField()
branchesCov = IntField()
branchesMis = IntField()
instructionsCov = IntField()
instructionsMis = IntField()
|
lgarren/spack | var/spack/repos/builtin/packages/r-annotationdbi/package.py | Python | lgpl-2.1 | 2,016 | 0.000992 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
#
from spack import *
c | lass RAnnotationdbi(RPackage):
"""Provides user interface and database connection code for
annotation data packages using SQLite data storage."""
homepage = "https://www.bioconductor.org/packages/AnnotationDbi/"
url = "https://git.bioconductor.org/packages/Annotati | onDbi"
list_url = homepage
version('1.38.2', git='https://git.bioconductor.org/packages/AnnotationDbi', commit='67d46facba8c15fa5f0eb47c4e39b53dbdc67c36')
depends_on('r@3.4.0:3.4.9', when='@1.38.2')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
depends_on('r-rsqlite', type=('build', 'run'))
|
CityOfNewYork/NYCOpenRecords | app/upload/constants/upload_status.py | Python | apache-2.0 | 80 | 0 | PROCESSING = "processing"
SCANNING = "scanning" |
READY = "ready"
ERROR = "e | rror"
|
mlperf/training_results_v0.7 | Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-16/lingvo/tasks/mt/model.py | Python | apache-2.0 | 16,381 | 0.006105 | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MT models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_layer
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_model
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import insertion
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import metrics
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import ml_perf_bleu_metric
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.tasks.mt import decoder
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.tasks.mt import encoder
from six.moves import range
class MTBaseModel(base_model.BaseTask):
"""Base Class for NMT models."""
def _EncoderDevice(self):
"""Returns the device to run the encoder computation."""
return tf.device('')
def _DecoderDevice(self):
"""Returns the device to run the decoder computation."""
return tf.device('')
@base_layer.initializer
def __init__(self, params):
super(MTBaseModel, self).__init__(params)
p = self.params
with tf.variable_scope(p.name):
with self._EncoderDevice():
if p.encoder:
self.CreateChild('enc', p.encoder)
with self._DecoderDevice():
self.CreateChild('dec', p.decoder)
def ComputePredictions(self, theta, batch):
p = self.params
with self._EncoderDevice():
encoder_outputs = (
self.enc.FProp(theta.enc, batch.src) if p.encoder else None)
with self._DecoderDevice():
predictions = self.dec.ComputePredictions(theta.dec, encoder_outputs,
batch.tgt)
if isinstance(predictions, py_utils.NestedMap):
# Pass through encoder output as well for possible use as a FProp output
# for various meta-MT modeling approaches, such as MT quality estimation
# classification.
predictions['encoder_outputs'] = encoder_outputs
return predictions
def ComputeLoss(self, theta, predictions, input_batch):
with self._DecoderDevice():
return self.dec.ComputeLoss(theta.dec, predictions, input_batch.tgt)
def _GetTokenizerKeyToUse(self, key):
"""Returns a tokenizer key to use for the provided `key`."""
if key in self.input_generator.tokenizer_dict:
return key
return None
def _BeamSearchDecode(self, input_batch):
p = self.params
with tf.name_scope('fprop'), tf.name_scope(p.name):
encoder_outputs = self.enc.FPropDefaultTheta(input_batch.src)
encoder_outputs = self.dec.AddExtraDecodingInfo(encoder_outputs,
input_batch.tgt)
decoder_outs = self.dec.BeamSearchDecode(encoder_outputs)
return self._ProcessBeamSearchDecodeOut(in | put_batch, encoder_outputs,
decoder_outs)
def _ProcessBeamSearchDecodeOut(self, input_batch, encoder_outputs,
decoder_outs):
self.r1_shape = decoder_outs[0]
self.r2_shape = decoder_outs[1]
self.r3_shape = decoder_outs[2]
tf.logging.info('r1_shape: %s', self.r1_shape)
tf.logging.info('r2_shape: %s', self.r2_shape)
tf.logging.info('r3_shape: %s', self.r3_shape) |
hyps = decoder_outs[3]
prev_hyps = decoder_outs[4]
done_hyps = decoder_outs[5]
scores = decoder_outs[6]
atten_probs = decoder_outs[7]
eos_scores = decoder_outs[8]
eos_atten_probs = decoder_outs[9]
source_seq_lengths = decoder_outs[10]
tlen = tf.cast(
tf.round(tf.reduce_sum(1.0 - input_batch.tgt.paddings, 1) - 1.0),
tf.int32)
ret_dict = {
'target_ids': input_batch.tgt.ids[:, 1:],
'eval_weight': input_batch.eval_weight,
'tlen': tlen,
'hyps': hyps,
'prev_hyps': prev_hyps,
'done_hyps': done_hyps,
'scores': scores,
'atten_probs': atten_probs,
'eos_scores': eos_scores,
'eos_atten_probs': eos_atten_probs,
'source_seq_lengths': source_seq_lengths,
}
return ret_dict
def PostProcessDecodeHost(self, metrics_dict):
p = self.params
ret_dict = {
'target_ids': metrics_dict['target_ids'],
'eval_weight': metrics_dict['eval_weight'],
'tlen': metrics_dict['tlen'],
}
r1_shape = self.r1_shape
r2_shape = self.r2_shape
r3_shape = self.r3_shape
ret_ids = self.dec.BeamSearchDecodePostProcess(
p.decoder.beam_search.num_hyps_per_beam,
p.decoder.target_seq_len,
r1_shape,
r2_shape,
r3_shape,
metrics_dict['hyps'],
metrics_dict['prev_hyps'],
metrics_dict['done_hyps'],
metrics_dict['scores'],
metrics_dict['atten_probs'],
metrics_dict['eos_scores'],
metrics_dict['eos_atten_probs'],
metrics_dict['source_seq_lengths'],
[])
(final_done_hyps, topk_hyps, topk_ids, topk_lens,
topk_scores) = ret_ids[0:5]
ret_dict['top_ids'] = topk_ids[::p.decoder.beam_search.num_hyps_per_beam]
ret_dict['top_lens'] = topk_lens[::p.decoder.beam_search.num_hyps_per_beam]
return ret_dict
def _PostProcessBeamSearchDecodeOut(self, dec_out_dict, dec_metrics_dict):
"""Post processes the output from `_BeamSearchDecode`."""
tgt_ids = dec_out_dict['target_ids']
tlen = dec_out_dict['tlen']
top_ids = dec_out_dict['top_ids']
top_lens = dec_out_dict['top_lens']
eval_weight = dec_out_dict['eval_weight']
total_eval_weight = 0.0
targets = self.input_generator.PythonIdsToStrings(tgt_ids, tlen)
num_samples = len(targets)
tf.logging.info('num_samples: %d', num_samples)
top_decoded = self.input_generator.PythonIdsToStrings(top_ids, top_lens - 1)
assert num_samples == len(top_decoded), ('%s vs %s' %
(num_samples, len(top_decoded)))
dec_metrics_dict['num_samples_in_batch'].Update(num_samples)
for i in range(num_samples):
tgt = targets[i]
top_hyp = top_decoded[i]
example_eval_weight = eval_weight[i]
total_eval_weight += example_eval_weight
dec_metrics_dict['ml_perf_bleu'].Update(tgt, top_hyp, example_eval_weight)
tf.logging.info('total_eval_weight: %f', total_eval_weight)
return []
def CreateDecoderMetrics(self):
decoder_metrics = {
'num_samples_in_batch': metrics.AverageMetric(),
'ml_perf_bleu': ml_perf_bleu_metric.MlPerfBleuMetric(),
}
return decoder_metrics
def Decode(self, input_batch):
"""Constructs the decoding graph."""
return self._BeamSearchDecode(input_batch)
def PostProcessDecodeOut(self, dec_out, dec_metrics):
return self._PostProcessBeamSearchDecodeOut(dec_out, dec_metrics)
class TransformerModel(MTBaseModel):
"""Transformer Model.
Implements Attention is All You Need:
https://arxiv.org/abs/1706.03762
"""
@classmethod
def Params(cls):
p = super(TransformerModel, cls).Params()
p.encoder = encoder.TransformerEncoder.Params()
p.decoder = decoder.Tr |
peterhinch/micropython-async | v3/as_drivers/as_GPS/as_rwGPS.py | Python | mit | 4,383 | 0.003194 | # as_rwGPS.py Asynchronous device driver for GPS devices using a UART.
# Supports a limited subset of the PMTK command packets employed by the
# widely used MTK3329/MTK3339 chip.
# Sentence parsing based on MicropyGPS by Michael Calvin McCoy
# https://github.com/inmcm/micropyGPS
# Copyright (c) 2018 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
import as_drivers.as_GPS as as_GPS
try:
from micropython import const
except ImportError:
const = lambda x : x
HOT_START = const(1)
WARM_START = const(2)
COLD_START = const(3)
FULL_COLD_START = const(4)
STANDBY = const(5)
DEFAULT_SENTENCES = const(6)
VERSION = const(7)
ENABLE = const(8)
ANTENNA = const(9)
NO_ANTENNA = const(10)
# Return CRC of a bytearray.
def _crc(sentence):
x = 1
crc = 0
while sentence[x] != ord('*'):
crc ^= sentence[x]
x += 1
return crc # integer
class GPS(as_GPS.AS_GPS):
fixed_commands = {HOT_START: b'$PMTK101*32\r\n',
WARM_START: b'$PMTK102*31\r\n',
COLD_START: b'$PMTK103*30\r\n',
FULL_COLD_START: b'$PMTK104*37\r\n',
STANDBY: b'$PMTK161,0*28\r\n',
DEFAULT_SENTENCES: b'$PMTK314,-1*04\r\n',
VERSION: b'$PMTK605*31\r\n',
ENABLE: b'$PMTK414*33\r\n',
ANTENNA: b'$PGCMD,33,1*6C',
NO_ANTENNA: b'$PGCMD,33,0*6D',
}
def __init__(self, sreader, swriter, local_offset=0,
fix_cb=lambda *_ : None, cb_mask=as_GPS.RMC, fix_cb_args=(),
msg_cb=lambda *_ : None, msg_cb_args=()):
super().__init__(sreader, local_offset, fix_cb, cb_mask, fix_cb_args)
self._swriter = swriter
self.version = None # Response to VERSION query
self.enabled = None # Response to ENABLE query
self.antenna = 0 # Response to ANTENNA.
self._msg_cb = msg_cb
self._msg_cb_args = msg_cb_args
async def _send(self, sentence):
# Create a bytes object containing hex CRC
bcrc = '{:2x}'.format(_crc(sentence)).encode()
sentence[-4] = bcrc[0] # Fix up CRC bytes
sentence[-3] = bcrc[1]
await self._swriter.awrite(sentence)
async def baudrate(self, value=9600):
if value not in (4800,9600,14400,19200,38400,57600,115200):
raise ValueError('Invalid baudrate {:d}.'.format(value)) |
sentence = bytearray('$PMTK251,{:d}*00\r\n'.format(value))
await self._send(sentence)
async def update_interval(self, ms=1000):
if ms < 100 or ms > 10000:
raise ValueError('Invalid update interval {:d}ms.'.format(ms))
sentence = bytearray('$PMTK220,{:d}*00\r\n'.format(ms))
await self._send(sentence)
self._update_ms = ms # Sav | e for timing driver
async def enable(self, *, gll=0, rmc=1, vtg=1, gga=1, gsa=1, gsv=5, chan=0):
fstr = '$PMTK314,{:d},{:d},{:d},{:d},{:d},{:d},0,0,0,0,0,0,0,0,0,0,0,0,{:d}*00\r\n'
sentence = bytearray(fstr.format(gll, rmc, vtg, gga, gsa, gsv, chan))
await self._send(sentence)
async def command(self, cmd):
if cmd not in self.fixed_commands:
raise ValueError('Invalid command {:s}.'.format(cmd))
await self._swriter.awrite(self.fixed_commands[cmd])
# Should get 705 from VERSION 514 from ENABLE
def parse(self, segs):
if segs[0] == 'PMTK705': # Version response
self.version = segs[1:]
segs[0] = 'version'
self._msg_cb(self, segs, *self._msg_cb_args)
return True
if segs[0] == 'PMTK514':
print('enabled segs', segs)
self.enabled = {'gll': segs[1], 'rmc': segs[2], 'vtg': segs[3],
'gga': segs[4], 'gsa': segs[5], 'gsv': segs[6],
'chan': segs[19]}
segs = ['enabled', self.enabled]
self._msg_cb(self, segs, *self._msg_cb_args)
return True
if segs[0] == 'PGTOP':
self.antenna = segs[2]
segs = ['antenna', self.antenna]
self._msg_cb(self, segs, *self._msg_cb_args)
return True
if segs[0][:4] == 'PMTK':
self._msg_cb(self, segs, *self._msg_cb_args)
return True
return False
|
chbrandt/zyxw | eada/io/ascii.py | Python | gpl-2.0 | 9,744 | 0.018986 | """Module to read/write ascii catalog files (CSV and DS9)"""
##@package catalogs
##@file ascii_data
"""
The following functions are meant to help in reading and writing text CSV catalogs
as well as DS9 region files. Main structure used is dictionaries do deal with catalog
data in a proper way.
"""
import sys
import logging
import csv
import re
import string
# ---
def dict_to_csv(columns, fieldnames=[], filename='cat.csv', mode='w', delimiter=','):
"""
Write a CSV catalog from given dictionary contents
Given dictionary is meant to store lists of values corresponding to key/column in the
csv file. So that each entry 'fieldnames' is expected to be found within 'columns'
keys, and the associated value (list) will be written in a csv column
Input:
- columns {str:[]} : Contents to be write in csv catalog
- fieldnames [str] : List with fieldnames/keys to read from 'columns'
- filename str : Name of csv catalog to write
- mode str : Write a new catalog, 'w', or append to an existing one, 'a'.
- delimiter str : Delimiter to use between columns in 'filename'
Output:
* If no error messages are returned, a file 'filename' is created.
Example:
>>> D = {'x':[0,0,1,1],'y':[0,1,0,1],'id':['0_0','0_1','1_0','1_1'],'z':[0,0.5,0.5,1]} #\
>>> fields = ['id','x','y','z'] #\
>>> #\
>>> dict_to_csv( D, fields, filename='test.csv') #\
>>> #\
>>> import os #\
>>> #\
---
"""
dictionary = columns.copy()
if fieldnames == []:
fieldnames = dictionary.keys()
for k in fieldnames:
if type(dictionary[k])!=type([]) and type(dictionary[k])!=type(()):
dictionary[k] = [dictionary[k]]
logging.debug("Fields being written to (csv) catalog: %s",fieldnames)
max_leng = max([ len(dictionary[k]) for k in fieldnames if type(dictionary[k])==type([]) ])
for k in fieldnames:
leng = len(dictionary[k])
if leng != max_leng:
dictionary[k].extend(dictionary[k]*(max_leng-leng))
catFile = open(filename,mode)
catObj = csv.writer(catFile, delimiter=delimiter)
catObj.writerow(fieldnames)
LL = [ dictionary[k] for k in fieldnames ]
for _row in zip(*LL):
catObj.writerow(_row)
catFile.close()
return
# ---
def dict_from_csv(filename, fieldnames, header_lines=1, delimiter=',',dialect='excel'):
"""
Read CSV catalog and return a dictionary with the contents
dict_from_csv( filename, fieldnames, ...) -> {}
To each column data read from 'filename' is given the respective 'fieldnames' entry.
(So that is expected that len(filednames) == #filename-columns)
It is expected that the first lines of the CSV file are header lines (comments). The
amount of header lines to avoid reading is given through 'header_lines'. 'delimiter'
specifies the field separators and 'dialect' is the CSV pattern used.
Use 'header_lines' to remove non-data lines from the head of the
file. Header lines are taken as comments and are not read.
Input:
- filename str : Name of csv catalog to read
- fieldnames [str] : Fieldnames to be read from catalog
- header_lines int : Number of lines to remove from the head of 'filename'
- delimiter str : Delimiter to use between columns in 'filename'
- dialect str : CSV file fine structure (See help(csv) for more info)
Output:
- {*fieldnames}
Example:
# >>> import os
# >>> os.system('grep -v "^#" /etc/passwd | head -n 3 > test.asc')
# 0
# >>> s = os.system('cat test.asc')
nobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false
root:*:0:0:System Administrator:/var/root:/bin/sh
daemon:*:1:1:System Services:/var/root:/usr/bin/false
>>>
>>> D = dict_from_csv('test.asc',['user','star'],delimiter=':',header_lines=0)
>>>
---
"""
# Initialize output dictionary
Dout = {};
for k in fieldnames:
Dout[k] = [];
#Initialize csv reader
catFile = open(filename,'r');
lixo_head = [ catFile.next() for i in range(header_lines) ];
catObj = csv.DictReader(catFile,fieldnames,delimiter=delimiter,dialect=dialect);
for row in catObj:
for k in fieldnames:
Dout[k].append(row[k]);
return Dout;
# ---
def write_ds9cat(x,y,size=20,marker='circle',color='red',outputfile='ds9.reg',filename='None'):
"""
Function to write a ds9 region file given a set of centroids
It works only with a circular 'marker' with fixed
radius for all (x,y) - 'centroids' - given.
Input:
- x : int | []
X-axis points
- y : int | []
Y-axis points
- size : int | []
- marker : str | [str]
- outputfile : str | [str]
Output:
<bool>
Example:
>>> write_ds9cat(x=100,y=100,outputfile='test.reg')
>>>
>>> import os
>>> s = os.system('cat test.reg')
# Region file format: DS9 version 4.1
# Filename: None
global color=green dashlist=8 3 width=1 font="helvetica 10 normal" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1
image
circle(100,100,20) # color=red
>>>
>>>
>>>
>>> write_ds9cat(x=[1,2],y=[0,3],outputfile='test.reg',size=[10,15],marker=['circle','box'])
>>>
>>> s = os.system('cat test.reg')
# Region file format: DS9 version 4.1
# Filename: None
global color=green dashlist=8 3 width=1 font="helvetica 10 normal" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1
image
circle(1,0,10) # color=red
box(2,3,15,15,0) # color=red
>>>
"""
try:
if len(x) != len(y):
sys.stderr.write("X and Y lengths do not math. Check their sizes.")
return False;
except:
x = [x];
y = [y];
centroids = zip(x,y);
length = len(centroids);
# Lets verify if everyone here is a list/tuple:
#
try:
len(size);
except TypeError:
size = [size];
_diff = max(0,length-len(size))
if _diff:
size.extend([ size[-1] for i in range(0,_diff+1) ]);
#
if type(marker) == type(str()):
marker = [marker];
_diff = max(0,length-len(marker))
if _diff:
marker.extend([ marker[-1] for i in range(0,_diff+1) ]);
#
if type(color) == type(str()):
color = [color];
_diff = max(0,length-len(color))
if _diff:
color.extend([ color[-1] for i in range(0,_diff+1) ]);
output = open(outputfile,'w');
# DS9 region file header
output.write("# Region file format: DS9 version 4.1\n");
output.write("# Filename: %s\n" % (filename));
output.write("global color=green dashlist=8 3 width=1 font=\"helvetica 10 normal\" ");
output.write("select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n");
output.write("image\n");
for i in range(length):
if marker[i] == 'circle':
output.write("circle(%s,%s,%s) # color | =%s\n" % (x[i],y[i],size[i],color[i]));
elif marker[i] == 'box':
output.write("box(%s,%s,%s,%s,0) # color=%s\n" % (x[i],y[i],size[i],size[i],color[i]));
output.close();
return
# ---
|
def read_ds9cat(regionfile):
""" Function to read ds9 region file
Only regions marked with a 'circle' or 'box' are read.
'color' used for region marks (circle/box) are given as
output together with 'x','y','dx','dy' as list in a
dictionary. The key 'image' in the output (<dict>) gives
the filename in the 'regionfile'.
Input:
- regionfile : ASCII (ds9 format) file
Output:
-> {'image':str,'x':[],'y':[],'size':[],'marker':[],'color':[]}
Example:
>>> write_ds9cat(x=[1,2],y=[0,3],outputfile='test.reg',size=[10,15])
>>>
>>> D = read_ds9cat('test.reg')
>>>
"""
D_out = {'filename':'', 'marker':[], 'color':[], 'x':[], 'y':[], 'size':[]};
fp = open(regionfile,'r');
for line in fp.readlines():
if (re.search("^#",line)):
if (re.search("Filename",line)):
imagename |
joaander/hoomd-blue | hoomd/jit/__init__.py | Python | bsd-3-clause | 703 | 0.007112 | # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
""" JIT
The JIT module provides *experimental* support to to JIT (just in time) compile C++ code and call it during the
simulation. Compiled C++ code will execute at full performance unlike interpreted python code.
.. rubric:: Stability
:py:mod:`hoomd.jit` i | s **unstable**. When upgrading from version 2.x to 2.y (y > x),
existing job scripts may need to be updated. **Maintainer:** Joshua A. Anderson, University of Michigan
.. versionadded:: 2.3
""" |
from hoomd.hpmc import _hpmc
from hoomd.jit import patch
from hoomd.jit import external
|
valkyriesavage/makers-marks | openscad.py | Python | mit | 20,842 | 0.016937 | import os, subprocess
from component import Component
from config import OPENSCAD, OPENSCAD_OLD, CHECK_INTERSECT_SCRIPT, SUB_COMPONENT_SCRIPT, \
PART_SCRIPT, BOSS_CHECK_COMPS_SCRIPT, BOSS_PUT_SCRIPT, SHELL_SCRIPT, DEFORM_SHELL_SCRIPT, \
MINKOWSKI_TOP, MINKOWSKI_BOT, BUTTON_CAP_SCRIPT, SCRATCH
'''
We will give the following to this part of the pipeline:
list of dictionaries of components with format above
STL file of scanned object
We expect to receive the following from this part of the pipeline:
1st time : model w/o intersecting components & w/ enough space for components
2nd time : two files with subbed comps, bosses, and parting lines
'''
def callOpenSCAD(script, oname, otherargs='', allow_empty=False):
'''
This function calls an openSCAD script and writes its output STL to a file.
If "allow_empty" is true, it permits an empty STL to be generated (and
returns True if the output is empty. If the output is not empty, it
returns False.
'''
call = [OPENSCAD, '-o', oname, script]
if not otherargs == '':
call = [OPENSCAD, '-o', oname, otherargs, script]
# this will throw an exception if the call fails for some reason
if allow_empty:
proc = subprocess.Popen(' '.join(call),shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,close_fds=True)
line = proc.stdout.readline()
if "Current top level object is empty." in line:
return True
if line == '':
return False
proc.terminate()
raise Exception(' '.join(['call failed : ', ' '.join(call), ", message", line]))
else:
subprocess.check_call(call)
return False
def createsEmptySTL(script, oname, otherargs=''):
return callOpenSCAD(script, oname, otherargs, allow_empty=True)
def placeCompOpenSCAD(component, geom):
'''
This function generates openSCAD code for placing a component. This component
must have associated translations and rotations. It places the component's
particular geometry defined by geom (i.e., "add", "sub", "clearance").
'''
output = '''
translate(%(coords)s) {
rotate(%(rotations)s) {
rotate(%(axis)s) {
import("stls/'''+Component.toStr(component['type'])+'-'+geom+'''.stl");
}
}
}
'''
return output % component
def placeBoundingBoxOpenSCAD(component, geom):
'''
This creates openSCAD code to add space for some components, defined by
their bounding box. That is, if a component doesn't quite fit, we pop
out some extra space for it.
'''
output = '''
translate(%(coords)s) {
rotate(%(rotations)s) {
rotate(%(axis)s)rotate([180,0,0])translate([0,0,7.5]) {
import("stls/'''+Component.toStr(component['type'])+'-'+geom+'''.stl");
}
}
}
'''
return output % component
def placeBossOpenSCAD(boss, geom='add', topbot=''):
'''
This places a single boss. It can place either the top or bottom part of it.
'''
boss['geom'] = geom
boss['topbot'] = topbot
text = '''
translate(%(coords)s) {
rotate(%(rotations)s) {
translate(%(offset)s) {
import("stls/boss-%(geom)s%(topbot)s.stl");
}
}
}
''' % boss
return text
def placePLineOpenSCAD(pline_comp):
'''
This is a small wrapper for placeCompOpenSCAD which returns openSCAD code for
correctly orienting a parting line.
'''
pline_comp['axis'] = 0
[x,y,z] = list(pline_comp['rotations'])
print x,y,z
pline_comp['rotations'] = [x+90,y+90,z]
return placeCompOpenSCAD(pline_comp, geom='woo')
def internalOnly(geometry, body):
'''
This returns openSCAD code that will add only the intersecting parts of
geometry and body.
'''
return '''
intersection() {
'''+geometry+'\n'+'''
import("'''+body+'''");
}
'''
def internalOnlyBoundingBox(geometry, body, component): #need to intersect the solid bb as well
'''
This returns OpenSCAD code that will add only the intersection of a
component's bounding box and a body geometry.
'''
return '''
intersection() {
'''+geometry+'\n'+'''
union() { import("'''+body+'''");
'''+placeBoundingBoxOpenSCAD(component, geom='bbsolid')+placeBoundingBoxOpenSCAD(component, geom='bbshell')+'''
}
}
'''
def writeOpenSCAD(script, components={}, object_body='', deflated='',
full_body='', top='', boss=None, bosses=[], topbot='',
debug=False):
'''
This horrifying function actually puts together the scripts necessary to do
the openSCAD operations that Makers' Marks relies on. It can write scripts
for deforming shells, checking intersections, and more, based on the
script name passed in. It's bad, though.
'''
text = 'union() {\n'
if script == DEFORM_SHELL_SCRIPT:
text += '''
difference() {
import("%(obj_body)s");
%(solid_bb_clearance)s
}
''' % {
'obj_body':object_body,
'solid_bb_clearance':placeBoundingBoxSCAD(components, geom='bbsolid')
} #subtracts translated solid bounding box from body
text += '''
difference() {
rotate([180,0,0])translate([0,0,7.5])%(shelled_bb)s
import("%(solid_obj_body)s");
}
''' | % {
'shelled_bb':placeBoundingBoxSCAD(components, geom='bbshell'),
'solid_obj_body':full_body
} #subtracts solid body from hollow bounding box
if script == CHECK_INTERSECT_SCRIPT and object_body == '':
text += '''
intersection() {
%(comp_0)s
%(comp_1)s
}
''' % {
'comp_0':placeCompOpenSCAD(components[0], geom='clearance'),
'comp_1':placeCompOpenSCAD(components[1], geom='clearance'),
}
if script == CHECK_INTERSECT_SCRIPT and not object_body == '':
| text += '''
intersection() {
%(comp_0)s
import("%(obj)s");
}
''' % {
'comp_0':placeCompOpenSCAD(components[0], geom='clearance'),
'obj':object_body,
}
if script == SUB_COMPONENT_SCRIPT:
comps_sub = ''
comps_add = ''
for component in components:
if component['type'] == Component.parting_line or component['type'] == Component.parting_line_calculated:
# these will be dealt with in a special step later
continue
comps_sub += placeCompOpenSCAD(component, geom='sub')
if Component.no_trim(component['type']):
comps_add += placeCompOpenSCAD(component, geom='add')
elif component['type'] in pushed_comp:
comps_add += internalOnlyBoundingBox(placeCompOpenSCAD(component, geom='add'),
full_body, component)
else:
comps_add += internalOnly(placeCompOpenSCAD(component, geom='add'),
full_body)
text += '''
difference() {
\timport("%(obj)s");
// first we need to subtract everything
\t%(comps_sub)s
}
// now we add mounting points back in (they are cut to size of the body)
%(comps_add)s
''' % {
'obj':object_body,
'comps_sub':comps_sub,
'comps_add':comps_add,
}
if script == PART_SCRIPT:
pline = 'cube(1);'
for comp in components:
if comp['type'] is Component.parting_line_calculated:
pline = placePLineOpenSCAD(comp)
break
if pline == '':
print 'wtf? no parting line?'
if top == True:
text += '''
difference(){
\timport("%(obj)s");
\t%(pline)s
}
''' % {
'obj':object_body,
'pline':pline,
}
if top == False:
text += '''
intersection(){
\timport("%(obj)s");
\t%(pline)s
}
''' % {
'obj':object_body,
'pline':pline,
}
if script == BUTTON_CAP_SCRIPT:
print "cutting button caps by ", components['offset']
text += '''
difference() {
import("stls/button-cap.stl");
translate([0,0,-%(z)s])import("stls/button-cap-sub.stl");
}
''' % {
'z':components['offset'],
}
if script == BOSS_CHECK_COMPS_SCRIPT:
all_comp_union = 'union(){'
for comp in components:
placecomp = placeCompOpenSCAD(comp, geom='clearance')
all_comp_union += (placecomp)
all_comp_union += '}'
text += '''
intersection() {
\t%(boss)s
\t%(comps)s
}
''' % {
'boss':placeBossOpenSCAD(boss,'add'),
'comps':all_comp_union,
}
if script == BOSS_PUT_SCRIPT:
bosses_add = ''
bosses_sub = ''
for boss in bosses:
bosses_add += placeBossOpenSCAD(boss,'add',topbot)
bosses_sub += placeBossOpenSCAD(boss,'sub',topbot)
text += '''
difference() {
// add together bosses a |
MatthewShao/mitmproxy | pathod/language/base.py | Python | mit | 13,851 | 0.000433 | import operator
import os
import abc
import functools
import pyparsing as pp
from mitmproxy.utils import strutils
from mitmproxy.utils import human
import typing # noqa
from . import generators
from . import exceptions
class Settings:
def __init__(
self,
is_client=False,
staticdir=None,
unconstrained_file_access=False,
request_host=None,
websocket_key=None,
protocol=None,
):
self.is_client = is_client
self.staticdir = staticdir
self.unconstrained_file_access = unconstrained_file_access
self.request_host = request_host
self.websocket_key = websocket_key # TODO: refactor this into the protocol
self.protocol = protocol
Sep = pp.Optional(pp.Literal(":")).suppress()
v_integer = pp.Word(pp.nums)\
.setName("integer")\
.setParseAction(lambda toks: int(toks[0]))
v_literal = pp.MatchFirst(
[
pp.QuotedString(
"\"",
unquoteResults=True,
multiline=True
),
pp.QuotedString(
"'",
unquoteResults=True,
multiline=True
),
]
)
v_naked_literal = pp.MatchFirst(
[
v_literal,
pp.Word("".join(i for i in pp.printables if i not in ",:\n@\'\""))
]
)
class Token:
"""
A token in the specification language. Tokens are immutable. The token
classes have no meaning in and of themselves, and are combined into
Components and Actions to build the language.
"""
__metaclass__ = abc.ABCMeta
@classmethod
def expr(cls): # pragma: no cover
"""
A parse expression.
"""
return None
@abc.abstractmethod
def spec(self): # pragma: no cover
"""
A parseable specification for this token.
"""
return None
@property
def unique_name(self) -> typing.Optional[str]:
"""
Controls uniqueness constraints for tokens. No two tokens with the
same name will be allowed. If no uniquness should be applied, this
should be None.
"""
return self.__class__.__name__.lower()
def resolve(self, settings_, msg_):
"""
Resolves this token to ready it for transmission. This means that
the calculated offsets of actions are fixed.
settings: a language.Settings instance
msg: The containing message
"""
return self
def __repr__(self):
return self.spec()
class _TokValueLiteral(Token):
def __init__(self, val):
self.val = strutils.escaped_str_to_bytes(val)
def get_generator(self, settings_):
return self.val
def freeze(self, settings_):
return self
class TokValueLiteral(_TokValueLiteral):
"""
A literal with Python-style string escaping
"""
@classmethod
def expr(cls):
e = v_literal.copy()
return e.setParseAction(cls.parseAction)
@classmethod
def parseAction(cls, x):
v = cls(*x)
return v
def spec(self):
inner = strutils.bytes_to_escaped_str(self.val)
inner = inner.replace(r"'", r"\x27")
return "'" + inner + "'"
class TokValueNakedLiteral(_TokValueLiteral):
@classmethod
def expr(cls): |
e = v_naked_literal.copy()
return e.setParseAction(lambda x: cls(*x))
def spec(self):
return strutils.bytes_to_escaped_str(self.val, escape_single_quotes=True)
class TokValueGenerate(Token):
| def __init__(self, usize, unit, datatype):
if not unit:
unit = "b"
self.usize, self.unit, self.datatype = usize, unit, datatype
def bytes(self):
return self.usize * human.SIZE_UNITS[self.unit]
def get_generator(self, settings_):
return generators.RandomGenerator(self.datatype, self.bytes())
def freeze(self, settings):
g = self.get_generator(settings)
return TokValueLiteral(strutils.bytes_to_escaped_str(g[:], escape_single_quotes=True))
@classmethod
def expr(cls):
e = pp.Literal("@").suppress() + v_integer
u = functools.reduce(
operator.or_,
[pp.Literal(i) for i in human.SIZE_UNITS.keys()]
).leaveWhitespace()
e = e + pp.Optional(u, default=None)
s = pp.Literal(",").suppress()
s += functools.reduce(
operator.or_,
[pp.Literal(i) for i in generators.DATATYPES.keys()]
)
e += pp.Optional(s, default="bytes")
return e.setParseAction(lambda x: cls(*x))
def spec(self):
s = "@%s" % self.usize
if self.unit != "b":
s += self.unit
if self.datatype != "bytes":
s += ",%s" % self.datatype
return s
class TokValueFile(Token):
def __init__(self, path):
self.path = str(path)
@classmethod
def expr(cls):
e = pp.Literal("<").suppress()
e = e + v_naked_literal
return e.setParseAction(lambda x: cls(*x))
def freeze(self, settings_):
return self
def get_generator(self, settings):
if not settings.staticdir:
raise exceptions.FileAccessDenied("File access disabled.")
s = os.path.expanduser(self.path)
s = os.path.normpath(
os.path.abspath(os.path.join(settings.staticdir, s))
)
uf = settings.unconstrained_file_access
if not uf and not s.startswith(os.path.normpath(settings.staticdir)):
raise exceptions.FileAccessDenied(
"File access outside of configured directory"
)
if not os.path.isfile(s):
raise exceptions.FileAccessDenied("File not readable")
return generators.FileGenerator(s)
def spec(self):
return "<'%s'" % self.path
TokValue = pp.MatchFirst(
[
TokValueGenerate.expr(),
TokValueFile.expr(),
TokValueLiteral.expr()
]
)
TokNakedValue = pp.MatchFirst(
[
TokValueGenerate.expr(),
TokValueFile.expr(),
TokValueLiteral.expr(),
TokValueNakedLiteral.expr(),
]
)
TokOffset = pp.MatchFirst(
[
v_integer,
pp.Literal("r"),
pp.Literal("a")
]
)
class _Component(Token):
"""
A value component of the primary specification of an message.
Components produce byte values describing the bytes of the message.
"""
def values(self, settings): # pragma: no cover
"""
A sequence of values, which can either be strings or generators.
"""
pass
def string(self, settings=None):
"""
A bytestring representation of the object.
"""
return b"".join(i[:] for i in self.values(settings or {}))
class KeyValue(_Component):
"""
A key/value pair.
cls.preamble: leader
"""
def __init__(self, key, value):
self.key, self.value = key, value
@classmethod
def expr(cls):
e = pp.Literal(cls.preamble).suppress()
e += TokValue
e += pp.Literal("=").suppress()
e += TokValue
return e.setParseAction(lambda x: cls(*x))
def spec(self):
return "%s%s=%s" % (self.preamble, self.key.spec(), self.value.spec())
def freeze(self, settings):
return self.__class__(
self.key.freeze(settings), self.value.freeze(settings)
)
class CaselessLiteral(_Component):
"""
A caseless token that can take only one value.
"""
def __init__(self, value):
self.value = value
@classmethod
def expr(cls):
spec = pp.CaselessLiteral(cls.TOK)
spec = spec.setParseAction(lambda x: cls(*x))
return spec
def values(self, settings):
return self.TOK
def spec(self):
return self.TOK
def freeze(self, settings_):
return self
class OptionsOrValue(_Component):
"""
Can be any of a specified set of options, or a value specifier.
"""
preamble = ""
options = [] # type: typing.List[str]
def __init__(self, valu |
IKeiran/FPT-Sinyakov | test/test_contact_compare.py | Python | apache-2.0 | 993 | 0.005035 | __author__ = 'Keiran'
from model.contact | import Contact
im | port pytest
def test_contact_compare(app, orm):
with pytest.allure.step('Given a sorted contact list from DB'):
contacts_from_db = orm.get_contact_list()
sorted_contacts_from_db = list(sorted(contacts_from_db, key=Contact.id_or_max))
with pytest.allure.step('Given a sorted contact list from home page'):
contacts_from_home_page = app.contact.get_contact_list()
sorted_contacts_from_home_page = list(sorted(contacts_from_home_page, key=Contact.id_or_max))
with pytest.allure.step('Then I compare this lists'):
for index in range(len(sorted_contacts_from_db)):
assert sorted_contacts_from_db[index] == sorted_contacts_from_home_page[index]
assert sorted_contacts_from_db[index].join_mails() == sorted_contacts_from_home_page[index].all_mails
assert sorted_contacts_from_db[index].join_phones() == sorted_contacts_from_home_page[index].all_phones
|
ferventdesert/Hawk-Projects | 安居客/安居客.py | Python | apache-2.0 | 2,402 | 0.081843 | # coding=utf-8
#下面的代码是接口函数,无关
def get(ar,index):
l=len(ar);
if index<0:
return ar[l+index];
else:
return ar[index];
def find(ar,filt | er):
for r in ar:
if filter(r):
return r;
return None;
def execute(ar,filter,action):
for r in ar:
if filter(r):
action(r);
unabled=[户型图存储方案,户型图存储,安居客户型列表,安居客评价,安居客楼盘详情,相册存储方案,安居客相册];
for e in unabled:
e.etls[0].Enabled=False
页数范围控制=find(安居客核心流程.etls,lambda x:x.TypeName=='数量范围选择' | )
#下面是可能需要修改的配置:
###################################################
重试次数='3'
##要跳过的页数,注意是翻页的数量
页数范围控制.Skip=0
##要获取的页数,可以设置的非常大,这样就一直会到末尾
页数范围控制.Take=20000000
debug=False
#是否要进行增量抓取?
#注意:系统会在数据库里查询是否已有数据,因此可能会造成在调试时,没有任何数据显示(所有的数据都在数据库里了)
#如果无所谓重复,或为了调试观察,则
not_repeat=True
def work2(x):
x.Enabled=not_repeat;
def work(x):
x.MaxTryCount=重试次数;
execute(安居客核心流程.etls,lambda x:x.TypeName=='从爬虫转换',work)
execute(安居客核心流程.etls,lambda x:x.Name=='防重复',work2)
execute(安居客核心流程.etls,lambda x:x.TypeName=='从爬虫转换',work)
get(安居客核心流程.etls,-2).Enabled=not debug;
#是否要将完整的Json信息保存到数据库中
get(安居客核心流程.etls,-3).Enabled=False
#是否要保存相册?不论是否保存,都会将相册的路径存入数据库中
get(安居客相册.etls,-1).Enabled=True
#是否要保存户型图?不论是否保存,都会将户型图的路径存入数据库中
get(户型图存储.etls,-1).Enabled=True
#要采集的城市,使用正则表达式,如果包含全部城市,则写为''
get(安居客城市.etls,-1).Script='锦州|景德镇|吉安|济宁|金华|揭阳|晋中|九江|焦作|晋城|荆州|佳木斯|酒泉|鸡西|济源|金昌|嘉峪关'
#户型图的存储路径
get(户型图存储方案.etls,-4).Format='D:\安居客图片\{0}\户型图\{1}_{2}_{3}.jpg'
#相册的存储路径
get(相册存储方案.etls,-4).Format='D:\安居客图片\{0}\相册\{1}_{2}_{3}.jpg' |
MKTCloud/MKTCloud | openstack_dashboard/dashboards/support/support/views.py | Python | apache-2.0 | 497 | 0.006036 | import log | ging
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard.dashboards.support.support import tables as project_tables
from horizon import views
class IndexView(tables.DataTableView):
# A very simple class-based view...
table_class = project_tables.SupportTable
template_name = 'support/support/index.html'
def get_data(self):
# Add data to the context here...
| resource = []
return resource
|
jamesmarva/myria | jsonQueries/case_conversion/generate_convert_zsh.py | Python | bsd-3-clause | 761 | 0.00657 | #!/usr/bin/env python
def fix(word):
ret = ""
flag = False
for c in word:
if c == '_':
flag = True
continue
if flag:
c = c.upper()
flag = False
| ret += c
return ret
words = [line.strip() for line in open('keywords.txt', 'r') if len(line.strip())]
fixed = [fix(word) for word in words]
sed_dblqt_expr = [r"s#\"{}\"#\"{}\"#g".format(w,f) for w, f in zip(words, fixed)]
sed_sglqt_expr = [r"s#'{}'#'{}'#g".format(w,f) for w, f in zip(words, fixed)]
front = """for file in **/*.json **/*.py **/*.java
do
sed -i bak -e \""""
middle = '\" \\\n -e \"'.join(sed_dblqt_expr + sed_sg | lqt_expr)
end = """\" $file
echo $file
done"""
print "%s%s%s" % (front, middle, end)
|
tremblerz/breach-detection-system | dashboard/bds/admin.py | Python | gpl-3.0 | 206 | 0.004854 | from | django.contrib import admin
from bds.models import Packet
from bds.models import CT,admins
# Register your mode | ls here.
admin.site.register(Packet)
admin.site.register(CT)
admin.site.register(admins)
|
badlogicmanpreet/nupic | tests/unit/nupic/encoders/random_distributed_scalar_test.py | Python | agpl-3.0 | 19,742 | 0.004306 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from cStringIO import StringIO
import sys
import tempfile
import unittest2 as unittest
import numpy
from nupic.encoders.base import defaultDtype
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaType
from nupic.support.unittesthelpers.algorithm_test_helpers import getSeed
from nupic.encoders.random_distributed_scalar import (
RandomDistributedScalarEncoder
)
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.random_distributed_scalar_capnp import (
RandomDistributedScalarEncoderProto
)
# Disable warnings about accessing protected members
# pylint: disable=W0212
def computeOverlap(x, y):
"""
Given two binary arrays, compute their overlap. The overlap is the number
of bits where x[i] and y[i] are both 1
"""
return (x & y).sum()
def validateEncoder(encoder, subsampling):
"""
Given an encoder, calculate overlaps statistics and ensure everything is ok.
We don't check every possible combination for speed reasons.
"""
for i in range(encoder.minIndex, encoder.maxIndex+1, 1):
for j in range(i+1, encoder.maxIndex+1, subsampling):
if not encoder._overlapOK(i, j):
return False
return True
class RandomDistributedScalarEncoderTest(unittest.TestCase):
"""
Unit tests for RandomDistributedScalarEncoder class.
"""
def testEncoding(self):
"""
Test basic encoding functionality. Create encodings without crashing and
check they contain the correct number of on and off bits. Check some
encodings for expected overlap. Test that encodings for old values don't
change once we generate new buckets.
"""
# Initialize with non-default parameters and encode with a number close to
# the offset
encoder = RandomDi | stributedScalarEncoder(name="encoder", resolution=1.0,
w=23, n=500, offset=0.0)
e0 = encoder.encode(-0.1)
self.a | ssertEqual(e0.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e0.size, 500, "Width of the vector is incorrect")
self.assertEqual(encoder.getBucketIndices(0.0)[0], encoder._maxBuckets / 2,
"Offset doesn't correspond to middle bucket")
self.assertEqual(len(encoder.bucketMap), 1, "Number of buckets is not 1")
# Encode with a number that is resolution away from offset. Now we should
# have two buckets and this encoding should be one bit away from e0
e1 = encoder.encode(1.0)
self.assertEqual(len(encoder.bucketMap), 2, "Number of buckets is not 2")
self.assertEqual(e1.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e1.size, 500, "Width of the vector is incorrect")
self.assertEqual(computeOverlap(e0, e1), 22, "Overlap is not equal to w-1")
# Encode with a number that is resolution*w away from offset. Now we should
# have many buckets and this encoding should have very little overlap with
# e0
e25 = encoder.encode(25.0)
self.assertGreater(len(encoder.bucketMap), 23,
"Number of buckets is not 2")
self.assertEqual(e25.sum(), 23, "Number of on bits is incorrect")
self.assertEqual(e25.size, 500, "Width of the vector is incorrect")
self.assertLess(computeOverlap(e0, e25), 4, "Overlap is too high")
# Test encoding consistency. The encodings for previous numbers
# shouldn't change even though we have added additional buckets
self.assertTrue(numpy.array_equal(e0, encoder.encode(-0.1)),
"Encodings are not consistent - they have changed after new buckets "
"have been created")
self.assertTrue(numpy.array_equal(e1, encoder.encode(1.0)),
"Encodings are not consistent - they have changed after new buckets "
"have been created")
def testMissingValues(self):
"""
Test that missing values and NaN return all zero's.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
empty = encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
empty = encoder.encode(float("nan"))
self.assertEqual(empty.sum(), 0)
def testResolution(self):
"""
Test that numbers within the same resolution return the same encoding.
Numbers outside the resolution should return different encodings.
"""
encoder = RandomDistributedScalarEncoder(name="encoder", resolution=1.0)
# Since 23.0 is the first encoded number, it will be the offset.
# Since resolution is 1, 22.9 and 23.4 should have the same bucket index and
# encoding.
e23 = encoder.encode(23.0)
e23p1 = encoder.encode(23.1)
e22p9 = encoder.encode(22.9)
e24 = encoder.encode(24.0)
self.assertEqual(e23.sum(), encoder.w)
self.assertEqual((e23 == e23p1).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers within resolution don't have the same encoding")
self.assertNotEqual((e23 == e24).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
e22p9 = encoder.encode(22.5)
self.assertNotEqual((e23 == e22p9).sum(), encoder.getWidth(),
"Numbers outside resolution have the same encoding")
def testMapBucketIndexToNonZeroBits(self):
"""
Test that mapBucketIndexToNonZeroBits works and that max buckets and
clipping are handled properly.
"""
encoder = RandomDistributedScalarEncoder(resolution=1.0, w=11, n=150)
# Set a low number of max buckets
encoder._initializeBucketMap(10, None)
encoder.encode(0.0)
encoder.encode(-7.0)
encoder.encode(7.0)
self.assertEqual(len(encoder.bucketMap), encoder._maxBuckets,
"_maxBuckets exceeded")
self.assertTrue(
numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(-1),
encoder.bucketMap[0]),
"mapBucketIndexToNonZeroBits did not handle negative"
" index")
self.assertTrue(
numpy.array_equal(encoder.mapBucketIndexToNonZeroBits(1000),
encoder.bucketMap[9]),
"mapBucketIndexToNonZeroBits did not handle negative index")
e23 = encoder.encode(23.0)
e6 = encoder.encode(6)
self.assertEqual((e23 == e6).sum(), encoder.getWidth(),
"Values not clipped correctly during encoding")
ep8 = encoder.encode(-8)
ep7 = encoder.encode(-7)
self.assertEqual((ep8 == ep7).sum(), encoder.getWidth(),
"Values not clipped correctly during encoding")
self.assertEqual(encoder.getBucketIndices(-8)[0], 0,
"getBucketIndices returned negative bucket index")
self.assertEqual(encoder.getBucketIndices(23)[0], encoder._maxBuckets-1,
"getBucketIndices returned bucket index that is too"
" large")
def testParameterChecks(self):
"""
Test that some bad construction parameters get handled.
"""
# n must be >= 6*w
with self.assertRaises(ValueError):
RandomDistributedScalarEncoder(name="mv", resolution=1.0, n=int(5.9*21))
# n must be an int
with self.assertRaises(Value |
rackerlabs/django-DefectDojo | dojo/management/commands/system_settings.py | Python | bsd-3-clause | 1,040 | 0 | from django.core.management.base import BaseCommand
from dojo.models import Sys | tem_Settings
class Command(BaseCommand):
help = 'Updates product grade calculation'
def handle(self, *args, **options):
code = """def grade_product(crit, high, med, low):
health=100
if crit > 0:
health = 40
health = health - ((crit - 1) * 5)
if high > 0:
| if health == 100:
health = 60
health = health - ((high - 1) * 3)
if med > 0:
if health == 100:
health = 80
health = health - ((med - 1) * 2)
if low > 0:
if health == 100:
health = 95
health = health - low
if health < 5:
health = 5
return health
"""
system_settings = System_Settings.objects.get(id=1)
system_settings.product_grade = code
system_settings.save()
|
gg0/libming-debian | test/Text/test02.py | Python | lgpl-2.1 | 451 | 0.035477 | #!/usr/bin/python
from ming import *
import sys
mediadir = sys.argv[1]+'/../Media'
m | = SWFMovie();
font = SWFFont(mediadir + "/font01.fdb")
text = SWFText(2);
text.setFont(font);
text.setHeight(20);
text.setColor(0x00, 0x00, 0x00, 0xff);
text.addString("abc");
font2 = | SWFFont(mediadir + "/test.ttf")
text.setFont(font2);
text.setHeight(40);
text.setColor(0xff, 0x00, 0x00, 0xff);
text.addString("def");
m.add(text);
m.save("test02.swf");
|
coxmediagroup/googleads-python-lib | examples/dfp/v201411/label_service/create_labels.py | Python | apache-2.0 | 1,668 | 0.008393 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new labels.
To determine which labels exist, run get_all_labels.py. This feature is only
available to DFP premium solution networks."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
de | f main(client):
# Initialize appropriate service.
label_service = client.GetServic | e('LabelService', version='v201411')
# Create label objects.
labels = []
for _ in xrange(5):
label = {
'name': 'Label #%s' % uuid.uuid4(),
'isActive': 'true',
'types': ['COMPETITIVE_EXCLUSION']
}
labels.append(label)
# Add Labels.
labels = label_service.createLabels(labels)
# Display results.
for label in labels:
print ('Label with id \'%s\', name \'%s\', and types {%s} was found.'
% (label['id'], label['name'], ','.join(label['types'])))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
agconti/KaggleAux | kaggleaux/__init__.py | Python | apache-2.0 | 188 | 0 | import dataframe
import modeling
import ploting
import s | coring
import utils
__all__ = ['dataframe',
'modeling',
'ploting', |
'scoring',
'utils']
|
hamsterbacke23/wagtail | wagtail/contrib/modeladmin/views.py | Python | bsd-3-clause | 35,740 | 0.000084 | from __future__ import absolute_import, unicode_literals
import operator
import sys
from collections import OrderedDict
from functools import reduce
from django import forms
from django.contrib.admin import FieldListFilter, widgets
from django.contrib.admin.exceptions import DisallowedModelAdminLookup
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote, unquote)
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ImproperlyConfigured, PermissionDenied, SuspiciousOperation
from django.core.paginator import InvalidPage, Paginator
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.sql.constants import QUERY_TERMS
from django.shortcuts import get_object_or_404, redirect, render
from django.template.defaultfilters import filesizeformat
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.edit_handlers import (
ObjectList, extract_panel_definitions_from_model_class)
from wagtail.wagtaildocs.models import get_document_model
from wagtail.wagtailimages.models import Filter, get_image_model
from .forms import ParentChooserForm
class WMABaseView(TemplateView):
"""
Groups together common functionality for all app views.
"""
model_admin = None
meta_title = ''
page_title = ''
page_subtitle = ''
def __init__(self, model_admin):
self.model_admin = model_admin
self.model = model_admin.model
self.opts = self.model._meta
self.app_label = force_text(self.opts.app_label)
self.model_name = force_text(self.opts.model_name)
self.verbose_name = force_text(self.opts.verbose_name)
self.verbose_name_plural = force_text(self.opts.verbose_name_plural)
self.pk_attname = self.opts.pk.attname
self.is_pagemodel = model_admin.is_pagemodel
self.permission_helper = model_admin.permission_helper
self.url_helper = model_admin.url_helper
def check_action_permitted(self, user):
return True
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted(request.user):
raise PermissionDenied
button_helper_class = self.model_admin.get_button_helper_class()
self.button_helper = button_helper_class(self, request)
return super(WMABaseView, self).dispatch(request, *args, **kwargs)
@cached_property
def menu_icon(self):
return self.model_admin.get_menu_icon()
@cached_property
def header_icon(self):
return self.menu_icon
def get_page_title(self):
return self.page_title or capfirst(self.opts.verbose_name_plural)
def get_meta_title(self):
return self.meta_title or self.get_page_title()
@cached_property
def index_url(self):
return self.url_helper.index_url
@cached_property
def create_url(self):
return self.url_helper.create_url
def get_base_queryset(self, request=None):
return self.model_admin.get_qu | eryset(request or self.request)
class ModelFormView(WMABaseView, FormView):
def get_edit_handler_class(self):
if hasattr(self.model, 'edit_handler'):
edit_handler = self.model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(self.model)
| edit_handler = ObjectList(panels)
return edit_handler.bind_to_model(self.model)
def get_form_class(self):
return self.get_edit_handler_class().get_form_class(self.model)
def get_success_url(self):
return self.index_url
def get_instance(self):
return getattr(self, 'instance', None) or self.model()
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs.update({'instance': self.get_instance()})
return kwargs
@property
def media(self):
return forms.Media(
css={'all': self.model_admin.get_form_view_extra_css()},
js=self.model_admin.get_form_view_extra_js()
)
def get_context_data(self, **kwargs):
context = super(ModelFormView, self).get_context_data(**kwargs)
instance = self.get_instance()
edit_handler_class = self.get_edit_handler_class()
form = self.get_form()
context.update({
'view': self,
'model_admin': self.model_admin,
'is_multipart': form.is_multipart(),
'edit_handler': edit_handler_class(instance=instance, form=form),
'form': form,
})
return context
def get_success_message(self, instance):
return _("{model_name} '{instance}' created.").format(
model_name=capfirst(self.opts.verbose_name), instance=instance)
def get_success_message_buttons(self, instance):
button_url = self.url_helper.get_action_url('edit', quote(instance.pk))
return [
messages.button(button_url, _('Edit'))
]
def get_error_message(self):
model_name = self.verbose_name
return _("The %s could not be created due to errors.") % model_name
def form_valid(self, form):
instance = form.save()
messages.success(
self.request, self.get_success_message(instance),
buttons=self.get_success_message_buttons(instance)
)
return redirect(self.get_success_url())
def form_invalid(self, form):
messages.error(self.request, self.get_error_message())
return self.render_to_response(self.get_context_data())
class InstanceSpecificView(WMABaseView):
instance_pk = None
pk_quoted = None
instance = None
def __init__(self, model_admin, instance_pk):
super(InstanceSpecificView, self).__init__(model_admin)
self.instance_pk = unquote(instance_pk)
self.pk_quoted = quote(self.instance_pk)
filter_kwargs = {}
filter_kwargs[self.pk_attname] = self.instance_pk
object_qs = model_admin.model._default_manager.get_queryset().filter(
**filter_kwargs)
self.instance = get_object_or_404(object_qs)
def get_page_subtitle(self):
return self.instance
@cached_property
def edit_url(self):
return self.url_helper.get_action_url('edit', self.pk_quoted)
@cached_property
def delete_url(self):
return self.url_helper.get_action_url('delete', self.pk_quoted)
class IndexView(WMABaseView):
# IndexView settings
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
# Only continue if logged in user has list permission
if not self.permission_helper.user_can_list(request.user):
raise PermissionDenied
self.list_display = self.model_admin.get_list_display(request)
self.list_filter = self.model_admin.get_list_filter(request)
self.search_fields = self.model_admin.get_search_fields(request)
self.items_per_page = self.model_admin.list_per_page
self.select_related = self.model_admin.list_select_related
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(self.PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.params = |
Castronova/EMIT | api_old/ODM2/LabAnalyses/services/createLabAnalyses.py | Python | gpl-2.0 | 223 | 0.022422 | __au | thor__ = 'Stephanie'
import sys
import os
from ... import serv | iceBase
from ..model import *
# from ODMconnection import SessionFactory
class createLabAnalyses (serviceBase):
def test(self):
return None |
dchucks/coursera-python-course | capstone/accidents-analysis/AccidentsJsonImporter.py | Python | gpl-3.0 | 2,735 | 0.004022 | import json
import sqlite3
import os
from os import listdir
conn = sqlite3.connect('accidentsdb.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DRO | P TABLE IF EXISTS AccidentsData;
CREATE TABLE AccidentsData (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
State TEXT,
Year INTEGER,
Accidents INTEGER,
Education TEXT
);
''')
filedir = os.path.join(os.path.dirname(__file__), "dataset//accidents-vs-e | ducation//2009-2015")
filelist = listdir(filedir)
print("Found following files in specified folder ", filedir, ":", filelist)
totrows = 0
for fname in filelist:
print("Now inserting data from:", fname)
if len(fname) > 1:
education = fname.split(".")[0]
# print("education", education)
str_data = open(filedir + "/" + fname).read()
json_data = json.loads(str_data)
# Get the listed Years from Fields and add them to our array
yearlist = []
for field in json_data['fields']:
try:
yearlist.append(int(field['label']))
except:
# print("error:", field['label'])
print("")
# Parse the accident data
for accidata in json_data['data']:
# print(accidata)
# State | Year | Accidents | Education
state = accidata[0]
# print("State:",state)
cnt = 0
for data in accidata:
if cnt > 0 and data != "NA" and data != "NR":
accidents = int(data)
year = yearlist[cnt - 1]
# We will not consider data where education is unknown or accidents were zero
if accidents > 0 and education != "Unknown":
cur.execute(
'''INSERT OR IGNORE INTO AccidentsData (State, Year, Accidents,
Education) VALUES ( ?, ?, ?, ? )''',
(state, year, accidents, education))
# print("[",cnt,"] Inserted:", state, year, accidents, education)
totrows = totrows + 1
cnt = cnt + 1
conn.commit()
print("Finished inserting", totrows, "rows of data from the json files.")
#Make data corrections
cur.execute("Update AccidentsData set State='Andaman & Nicobar Islands' where state = 'Andaman and Nicobar Islands'")
cur.execute("Update AccidentsData set State='Dadra & Nagar Haveli' where state='Dadra and Nagar Haveli'")
cur.execute("Update AccidentsData set State='Daman & Diu' where state='Daman and Diu'")
conn.commit()
print("Corrected names of 3 states")
|
Seanmcn/poker | tests/test_hand.py | Python | mit | 5,035 | 0.000993 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, print_function
import pickle
import pytest
from poker import Hand, Combo, Rank
def test_first_and_second_are_instances_of_Rank():
assert isinstance(Hand('22').first, Rank)
assert Hand('22').first == Rank('2')
assert isinstance(Hand('22').second, Rank)
assert Hand('22').second == Rank('2')
def test_representations():
assert str(Hand('22')) == '22'
assert str(Hand('AKs')) == 'AKs'
assert str(Hand('KJo')) == 'KJo'
assert repr(Hand('22')) == "Hand('22')"
def test_ordering():
assert Hand('AKo') > Hand('AQs')
assert Hand('AKo') > Hand('KQo')
assert Hand('AKs') > Hand('54o')
assert Hand('AJo') > Hand('A2s')
assert Hand('AJo') > Hand('A2o')
assert Hand('KQo') > Hand('KJo')
assert Hand('76s') > Hand('75s')
assert Hand('76o') > Hand('75s')
assert Hand('33') > Hand('22')
assert Hand('22') > Hand('JTo')
assert Hand('22') > Hand('A2o')
assert Hand('22') > Hand('A3s')
assert Hand('JJ') > Hand('JTs')
assert Hand('JJ') > Hand('AJs')
assert Hand('76s') > Hand('76o')
def test_ordering_is_unambiguous():
assert Hand('76o') < Hand('J6o')
assert Hand('J6o') > Hand('76o')
assert (Hand('J6o') < Hand('76o')) is False
def test_ordering_reverse():
assert Hand('AQs') < Hand('AKo')
assert Hand('KQo') < Hand('AKo')
assert Hand('54o') < Hand('AKs')
assert Hand('A2s') < Hand('AJo')
assert Hand('A2o') < Hand('AJo')
assert Hand('KJo') < Hand('KQo')
assert Hand('75s') < Hand('76s')
assert Hand('75s') < Hand('76o')
assert Hand('22') < Hand('33')
assert Hand('JTo') < Hand('22')
assert Hand('A2o') < Hand('22')
assert Hand('A3s') < Hand('22')
assert Hand('JTs') < Hand('JJ')
assert Hand('AJs') < Hand('JJ')
assert Hand('76o') < Hand('76s')
def test_only_same_suits_are_equal():
assert Hand('AKo') == Hand('AKo')
assert Hand('AKo') != Hand('AKs')
def test_case_insensitive():
assert Hand('AKo') == Hand('akO')
assert Hand('jks') == Hand('JKS')
def test_equality():
assert Hand('AKs') != Hand('44')
assert Hand('22') != Hand('33')
assert Hand('22') == Hand('22')
def test_is_suited():
assert Hand('AKs').is_suited is True
assert Hand('AKo').is_suited is False
assert Hand('22').is_suited is False
def test_is_offsuit():
assert Hand('AKs').is_offsuit is False
assert Hand('AKo').is_offsuit is True
assert Hand('22').is_offsuit is False
def test_is_connector():
assert Hand('76o').is_connector is True
assert Hand('AKo').is_connector is True
assert Hand('22').is_connector is False
assert Hand('85o').is_connector is False
def test_is_one_gapper():
assert Hand('86s').is_one_gapper is True
assert Hand('AQo').is_one_gapper is True
def test_is_two_gapper():
assert Hand('85s').is_two_gapper is True
assert Hand('AJo').is_two_gapper is True
assert Hand('86s').is_two_gapper is False
assert Hand('ATo').is_two_gapper is False
def test_is_suited_connector():
assert Hand('76s').is_suited_connector is True
assert Hand('45s').is_suited_connector is True
assert Hand('55').is_suited_connector is False
assert Hand('76o').is_suited_connector is False
def test_is_broadway():
assert Hand('AKo').is_broadway is True
assert Hand('J9o').is_broadway is False
assert Hand('99').is_broadway is False
def test_is_pair():
assert Hand('22').is_pair is True
assert Hand('86s').is_pair is False
def test_invalid_suit_raises_ValueError():
with pytest.raises(ValueError):
Hand('32l')
def test_invalid_rank_raises_ValueError():
with pytest.raises(ValueError):
Hand('AMs')
def test_pair_with_suit_raises_ValueError():
with pytest.raises(ValueError):
Hand('22s')
def test_hand_without_suit_raises_ValueError():
with pytest.raises(ValueError):
Hand('AK')
def test_make_random():
hand = Hand.make_random()
assert isinstance(hand, Hand)
assert isinstance(hand.first, Rank)
assert isinstance(hand.second, Rank)
def test_hash():
hand1 = Hand('22')
hand2 = Hand('22')
assert hash(hand1) == hash(hand2)
def test_putting_them_in_set_doesnt_raise_Exception():
{Hand('22'), Hand('AKo')}
def test_pair_hand_to_combos():
assert Hand('22').to_combos() == (
Combo('2c2d'), Combo('2c2h'), Combo('2c2s'), Combo('2d2h'), Combo('2d2s'), Combo('2h2s')
)
def test_offsuit_hand_to_combos():
assert Hand('76o').to_combos() == (
Combo('7c6d'), Combo('7c6h'), Combo('7c6s'), Combo('7d6c'), Combo('7d6h'), Combo('7d6s'),
Combo('7h6c'), Combo('7h6d'), Combo('7h6s'), Combo('7s6c') | , Combo('7s6d'), Combo('7s6h')
)
def test_s | uited_hand_to_combos():
assert Hand('76s').to_combos() == (Combo('7c6c'), Combo('7d6d'), Combo('7h6h'), Combo('7s6s'))
def test_pickable():
assert pickle.loads(pickle.dumps(Hand('Ako'))) == Hand('AKo')
|
wsqhubapp/learning_log | learning_logs/migrations/0002_entry.py | Python | apache-2.0 | 869 | 0.002301 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-06 16:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('learning_logs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose | _name='ID')),
('text', models.TextField()),
('date_added', models.DateTimeField(auto_now_add=True)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='learning_logs.Topic')),
],
options={
| 'verbose_name_plural': 'entries',
},
),
]
|
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/core/tools/special.py | Python | mit | 24,211 | 0.003552 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.tools.special Special functions.
# -----------------------------------------------------------------
# Ensure Python 3 | compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
# Import the relevant PTS classes and modules
from ...magic.core.frame import Frame
from ..basics.remote import Remote, connected_remotes
from . import time
from . import filesystem as fs
from .logging import log
# -----------------------------------------------------------------
def remote_convolution(image, kernel, host_id):
"""
This function | ...
:param image:
:param kernel:
:param host_id:
"""
# Check whether we are already connected to the specified remote host
if host_id in connected_remotes and connected_remotes[host_id] is not None:
remote = connected_remotes[host_id]
else:
# Debugging
log.debug("Logging in to remote host ...")
# Create a remote instance for the specified host ID
remote = Remote()
remote.setup(host_id)
# Debugging
log.debug("Creating temporary directory remotely ...")
# Create a temporary directory to do the convolution
remote_home_directory = remote.home_directory
remote_temp_path = fs.join(remote_home_directory, time.unique_name("convolution"))
remote.create_directory(remote_temp_path)
# Debugging
#log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel FITS file to the remote directory
#remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
#remote.upload(kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Creating a local temporary directory ...")
# Create a temporary directory locally to contain the frames
local_temp_path = fs.join(fs.home(), time.unique_name("convolution"))
fs.create_directory(local_temp_path)
# Debugging
log.debug("Saving the image frames to the temporary directory ...")
# Save the frames
local_frame_paths = []
constant_frames = []
for frame_name in image.frames:
frame_path = fs.join(local_temp_path, frame_name + ".fits")
# Only upload and convolve non-constant frames
if not image.frames[frame_name].is_constant():
image.frames[frame_name].save(frame_path)
local_frame_paths.append(frame_path)
else:
log.debug("The " + frame_name + " frame is constant, so this won't be uploaded and convolved")
constant_frames.append(frame_name)
# Debugging
log.debug("Saving the kernel to the temporary directory ...")
local_kernel_path = fs.join(local_temp_path, "kernel.fits")
kernel.save(local_kernel_path)
# Debugging
log.debug("Uploading the image frames to the remote directory ...")
# Upload the frames
remote_frame_paths = []
for local_frame_path in local_frame_paths:
# Determine the name of the local frame file
frame_file_name = fs.name(local_frame_path)
# Debugging
log.debug("Uploading the " + fs.strip_extension(frame_file_name) + " frame ...")
# Upload the frame file
remote_frame_path = fs.join(remote_temp_path, frame_file_name)
remote.upload(local_frame_path, remote_temp_path, new_name=frame_file_name, compress=True, show_output=True)
remote_frame_paths.append(remote_frame_path)
# Debugging
log.debug("Uploading the kernel to the remote directory ...")
# Upload the kernel
remote_kernel_path = fs.join(remote_temp_path, "kernel.fits")
remote.upload(local_kernel_path, remote_temp_path, new_name="kernel.fits", compress=True, show_output=True)
# Debugging
log.debug("Creating a python script to perform the convolution remotely ...")
# Create a python script that does the convolution
#script_file = tempfile.NamedTemporaryFile()
#local_script_path = script_file.name
local_script_path = fs.join(local_temp_path, "convolve.py")
script_file = open(local_script_path, 'w')
script_file.write("#!/usr/bin/env python\n")
script_file.write("# -*- coding: utf8 -*-\n")
script_file.write("\n")
script_file.write("# Import astronomical modules\n")
script_file.write("from astropy.units import Unit\n")
script_file.write("\n")
script_file.write("# Import the relevant PTS classes and modules\n")
script_file.write("from pts.magic.core.frame import Frame\n")
script_file.write("from pts.magic.core.image import Image\n")
script_file.write("from pts.magic.core.kernel import ConvolutionKernel\n")
script_file.write("from pts.core.tools.logging import log\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the kernel frame ...')\n")
script_file.write("\n")
script_file.write("# Open the kernel\n")
script_file.write("kernel = ConvolutionKernel.from_file('" + remote_kernel_path + "')\n")
script_file.write("\n")
for remote_frame_path in remote_frame_paths:
frame_name = fs.strip_extension(fs.name(remote_frame_path))
script_file.write("# Inform the user\n")
script_file.write("log.info('Opening the " + frame_name + " frame ...')\n")
script_file.write("\n")
script_file.write("# Open the frame\n")
script_file.write("frame = Frame.from_file('" + remote_frame_path + "')\n")
script_file.write("\n")
script_file.write("# Inform the user\n")
script_file.write("log.info('Convolving the " + frame_name + " frame ...')\n")
script_file.write("\n")
script_file.write("# Do the convolution and save the result\n")
script_file.write("frame.convolve(kernel, allow_huge=True)\n")
script_file.write("frame.save('" + remote_frame_path + "')\n") # overwrite the frame
script_file.write("\n")
#script_file.write("# Save the image\n")
#script_file.write("image.save(" + remote_image_path + ")\n")
# Write to disk
#script_file.flush()
script_file.close()
# Debugging
log.debug("Uploading the python script ...")
# Upload the script file
remote_script_path = fs.join(remote_temp_path, "convolve.py")
remote.upload(local_script_path, remote_temp_path, new_name="convolve.py", show_output=True)
# Close the local script (it is automatically removed)
#script_file.close()
# Debugging
log.debug("Executing the script remotely ...")
# Execute the script file remotely
remote.execute("python " + remote_script_path, output=False, show_output=True)
# Debugging
log.debug("Downloading the results ...")
# Download the resulting FITS file (the convolved image)
#local_result_path = self.full_output_path("convolved.fits")
#remote.download(remote_image_path, fs.directory_of(local_result_path), new_name="convolved.fits", compress=True)
for remote_frame_path in remote_frame_paths:
# Determine the name of the local frame file
frame_file_name = fs.name(remote_frame_path)
# Debugging
log.debug("Downloading the " + fs.strip_extension(frame_file_name) + " frame ...")
# Download
remote.download(remote_frame_path, local_temp_path, new_name=frame_file_name, compress=True, show_output=True)
# Remove the temporary directory on the remote's filesystem
remote.remove_directory(remote_temp_path)
# Load the result
#self.image = Image.from_file(local_result_path)
for frame_name in image.frames.keys():
if frame_name in constant_frames: continue # Skip constant frames, these are not convolved
local_frame_path = fs.join(local_temp_path, frame_name + ".fits")
image.frames[frame_name] = |
vensder/itmo_python | my_circle_class.py | Python | gpl-3.0 | 458 | 0.050218 | #my_circle_class.py
class Circle:
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
def are | a(self):
return 3.14 * self.get_radius() * self.get_radius()
@property
def r(self):
return self._r
@r.setter
def r(self, radius):
if r < 0:
raise ValueError('Radius sho | uld be positive')
self._r = r
circle = Circle(0, 0, 1)
print(circle.area())
circle.set_radius(3)
print(circle.area())
circle.r = -10
print(circle.r)
|
kevinlee12/oppia | core/tests/build_sources/extensions/base.py | Python | apache-2.0 | 10,783 | 0.000185 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for defining interactions.
A note on terminology: state_customization_args refers to the values of
customization args that are provided by an exploration editor. They are
formatted as
{ca_name: {value: ca_value}}
On the other hand, interaction.customization_args refers to a combination of
the interaction customization arg spec and the value used. It is a list of
dicts, each representing a customization arg -- viz.:
[{
'name': ca_name,
'value': ca_value,
'default_value': ...,
...
}]
"""
from __future__ import annotations
import copy
import json
import os
import sys
from core import feconf
from core import python_utils
from core import utils
from core.domain import object_registry
from core.domain import visualization_registry
from extensions import domain
from extensions.objects.models import objects
# Indicates that the learner view of the interaction should be displayed in the
# context of the conversation.
DISPLAY_MODE_INLINE = 'inline'
# Indicates that the learner view of the interaction should be displayed as a
# separate object from the conversation.
DISPLAY_MODE_SUPPLEMENTAL = 'supplemental'
ALLOWED_DISPLAY_MODES = [DISPLAY_MODE_SUPPLEMENTAL, DISPLAY_MODE_INLINE]
class BaseInteraction:
"""Base interaction definition class.
This class is not meant to be user-editable. The only methods on it should
be get()-type methods.
Note that all interactions should also include a thumbnail image of size
178 x 146 pixels. This image will be shown in the interaction selector.
"""
# The human-readable name of the interaction. Overridden in subclasses.
name = ''
# A description of the interaction. Overridden in subclasses.
description = ''
# Describes how the interaction | should be displayed -- either within the
# conversation ('inline'), or as a separate object ('supplemental'). In the
# latter case, the interaction instance is reused if two adjacent states
# have the same interaction id.
display_mode = ''
# Whether this interaction should be consid | ered terminal, i.e. it ends
# the exploration. Defaults to False.
is_terminal = False
# Whether the interaction has only one possible answer.
is_linear = False
# Whether this interaction supports machine learning classification.
# TODO(chiangs): remove once classifier_services is generalized.
is_trainable = False
# Additional JS library dependencies that should be loaded in pages
# containing this interaction. These should correspond to names of files in
# feconf.DEPENDENCIES_TEMPLATES_DIR. Overridden in subclasses.
_dependency_ids = []
# The type of answer (as a string) accepted by this interaction, e.g.
# 'CodeEvaluation'. This should be None for linear and terminal
# interactions.
answer_type = None
# Customization arg specifications for the component, including their
# descriptions, schemas and default values. Overridden in subclasses.
_customization_arg_specs = []
# Specs for desired visualizations of recorded state answers. Overridden
# in subclasses.
_answer_visualization_specs = []
# Instructions for using this interaction, to be shown to the learner. Only
# relevant for supplemental interactions.
instructions = None
# Instructions for using this interaction, to be shown to the learner. Only
# shows up when view port is narrow. Only relevent for supplemental
# interactions.
narrow_instructions = None
# Whether the answer is long, and would benefit from being summarized.
needs_summary = False
# The heading for the 'default outcome' section in the editor. This should
# be None unless the interaction is linear and non-terminal.
default_outcome_heading = None
# Whether the solution feature supports this interaction.
can_have_solution = None
# Whether to show a Submit button in the progress navigation area. This is
# a generic submit button so do not use this if special interaction-specific
# behavior is required. The interaction JS must also handle the
# EVENT_PROGRESS_NAV_SUBMITTED event broadcast by this Submit button.
show_generic_submit_button = False
# Temporary cache for the rule definitions.
_cached_rules_dict = None
@property
def id(self):
return self.__class__.__name__
@property
def customization_arg_specs(self):
return [
domain.CustomizationArgSpec(**cas)
for cas in self._customization_arg_specs]
@property
def answer_visualization_specs(self):
return self._answer_visualization_specs
@property
def answer_visualizations(self):
result = []
for spec in self._answer_visualization_specs:
factory_cls = (
visualization_registry.Registry.get_visualization_class(
spec['id']))
result.append(
factory_cls(
spec['calculation_id'], spec['options'],
spec['addressed_info_is_supported']))
return result
@property
def answer_calculation_ids(self):
visualizations = self.answer_visualizations
return set(
[visualization.calculation_id for visualization in visualizations])
@property
def dependency_ids(self):
return copy.deepcopy(self._dependency_ids)
def normalize_answer(self, answer):
"""Normalizes a learner's input to this interaction."""
if self.answer_type is None:
return None
else:
return object_registry.Registry.get_object_class_by_type(
self.answer_type).normalize(answer)
@property
def rules_dict(self):
"""A dict of rule names to rule properties."""
if self._cached_rules_dict is not None:
return self._cached_rules_dict
rules_index_dict = json.loads(
python_utils.get_package_file_contents(
'extensions', feconf.RULES_DESCRIPTIONS_EXTENSIONS_MODULE_PATH))
self._cached_rules_dict = rules_index_dict[self.id]
return self._cached_rules_dict
@property
def _rule_description_strings(self):
return {
rule_name: self.rules_dict[rule_name]['description']
for rule_name in self.rules_dict
}
@property
def html_body(self):
"""The HTML code containing directives and templates for the
interaction. This contains everything needed to display the interaction
once the necessary attributes are supplied.
Each interaction has two directive/template pairs, one for the
interaction itself and the other for displaying the learner's response
in a read-only view after it has been submitted.
"""
html_templates = utils.get_file_contents(os.path.join(
feconf.INTERACTIONS_DIR, self.id, '%s.html' % self.id))
return html_templates
@property
def validator_html(self):
"""The HTML code containing validators for the interaction's
customization_args and submission handler.
"""
return (
'<script>%s</script>\n' %
utils.get_file_contents(os.path.join(
feconf.INTERACTIONS_DIR,
self.id,
'%sValidationService.js' % self.id)))
def to_dict(self):
"""Gets a dict representing this interaction. Only default values are
provided.
"" |
suyashphadtare/vestasi-erp-1 | erpnext/erpnext/accounts/report/gross_profit/gross_profit.py | Python | agpl-3.0 | 4,692 | 0.032396 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from erpnext.stock.utils import get_buying_amount, get_sales_bom_buying_amount
def execute(filters=None):
if not filters: filters = {}
stock_ledger_entries = get_stock_ledger_entries(filters)
source = get_source_data(filters)
item_sales_bom = get_item_sales_bom()
columns = ["Delivery Note/Sales Invoice::120", "Link::30", "Posting Date:Date", "Posting Time",
"Item Code:Link/Item", "Item Name", "Description", "Warehouse:Link/Warehouse",
"Qty:Float", "Selling Rate:Currency", "Avg. Buying Rate:Currency",
"Selling Amount:Currency", "Buying Amount:Currency",
"Gross Profit:Currency", "Gross Profit %:Percent", "Project:Link/Project"]
data = []
for row in source:
selling_amount = flt(row.base_amount)
item_sales_bom_map = item_sales_bom.get(row.parenttype, {}).get(row.name, frappe._dict())
if item_sales_bom_map.get(row.item_code):
buying_amount = get_sales_bom_buying_amount(row.item_code, row.warehouse,
row.parenttype, row.name, row.item_row, stock_ledger_entries, item_sales_bom_map)
else:
buying_amount = get_buying_amount(row.parenttype, row.name, row.item_row,
stock_ledger_entries.get((row.item_code, row.warehouse), []))
buying_amount = buying_amount > 0 and buying_amount or 0
gross_profit = selling_amount - buying_amount
if selling_amount:
gross_profit_percent = (gross_profit / selling_amount) * 100.0
else:
gross_profit_percent = 0.0
icon = """<a href="%s"><i class="icon icon-share" style="cursor: pointer;"></i></a>""" \
% ("/".join(["#Form", row.parenttype, row.name]),)
data.append([row.name, icon, row.posting_date, row.posting_time, row.item_code, row.item_name,
row.description, row.warehouse, row.qty, row.base_rate,
row.qty and (buying_amount / row.qty) or 0, row.base_amount, buying_amount,
gross_profit, gross_profit_percent, row.project])
return columns, data
def get_stock_ledger_entries(filters):
query = """select item_code, voucher_type, voucher_no,
voucher_detail_no, posting_date, posting_time, stock_value,
warehouse, actual_qty as qty
from `tabStock Ledger Entry`"""
if filters.get("company"):
query += """ where company=%(company)s"""
query += | " order by item_code desc, warehouse desc, posting_date desc, posting_time desc, name desc"
res = frappe.db.sql(query, filters, as_dict=True)
out = {}
for r in res:
if (r.item_code, r.warehouse) not in out:
out[(r.item_code, r.warehouse)] = []
out[(r.item_code, r.warehouse)].append(r)
return out
def get_item_sales_bom():
item_sales_bom = {}
for d in frappe.db.sql("""select parenttype, parent, parent_item,
item_code, warehouse, -1*qty as total_qty, parent_detail_docname
fro | m `tabPacked Item` where docstatus=1""", as_dict=True):
item_sales_bom.setdefault(d.parenttype, frappe._dict()).setdefault(d.parent,
frappe._dict()).setdefault(d.parent_item, []).append(d)
return item_sales_bom
def get_source_data(filters):
conditions = ""
if filters.get("company"):
conditions += " and company=%(company)s"
if filters.get("from_date"):
conditions += " and posting_date>=%(from_date)s"
if filters.get("to_date"):
conditions += " and posting_date<=%(to_date)s"
delivery_note_items = frappe.db.sql("""select item.parenttype, dn.name,
dn.posting_date, dn.posting_time, dn.project_name,
item.item_code, item.item_name, item.description, item.warehouse,
item.qty, item.base_rate, item.base_amount, item.name as "item_row",
timestamp(dn.posting_date, dn.posting_time) as posting_datetime
from `tabDelivery Note` dn, `tabDelivery Note Item` item
where item.parent = dn.name and dn.docstatus = 1 %s
order by dn.posting_date desc, dn.posting_time desc""" % (conditions,), filters, as_dict=1)
sales_invoice_items = frappe.db.sql("""select item.parenttype, si.name,
si.posting_date, si.posting_time, si.project_name,
item.item_code, item.item_name, item.description, item.warehouse,
item.qty, item.base_rate, item.base_amount, item.name as "item_row",
timestamp(si.posting_date, si.posting_time) as posting_datetime
from `tabSales Invoice` si, `tabSales Invoice Item` item
where item.parent = si.name and si.docstatus = 1 %s
and si.update_stock = 1
order by si.posting_date desc, si.posting_time desc""" % (conditions,), filters, as_dict=1)
source = delivery_note_items + sales_invoice_items
if len(source) > len(delivery_note_items):
source.sort(key=lambda d: d.posting_datetime, reverse=True)
return source |
siddharth96/windows-tweaker | WindowsTweaker/Search/stemmer.py | Python | gpl-3.0 | 3,563 | 0.001403 | #!/usr/bin/env python
"""
Script Usage:
Write to a file with indentation:
python %(scriptName)s --input <input-file> --output englishTermAndUiElementMap.json --indent=4
Write to a file without any indentation:
python %(scriptName)s --input <input-file> --output englishTermAndUiElementMap.json
Write to stdout with indentation:
python %(scriptName)s --input <input-file> --indent=4
Write to stdout without any indentation:
p | ython %(scriptName)s --input <input-file-path>
"""
import argparse
import json
import Stemmer
from collections import defaultdict
from stemming import porter2
EN = "en"
DE = "de"
RU = "ru"
FR = "fr"
UI_ELEMENT_NAME = 0
MAIN_NAV_ITEM | = 1
SUB_TAB_CONTROL = 2
SUB_TAB = 3
def stemmer_meth(lang):
if lang == EN:
print 'Returning eng stem'
return porter2.stem
if lang == DE:
print 'Returning german stem'
return Stemmer.Stemmer('german').stemWord
if lang == RU:
print 'Returning russian stem'
return Stemmer.Stemmer('russian').stemWord
if lang == FR:
print 'Returning french stem'
return Stemmer.Stemmer('french').stemWord
return lambda x: x
def stem_file(file_path, lang, output_file_path, indent, separator="=>"):
term_and_ui_element_map = defaultdict(list)
ui_element_map_for_term = lambda ui_element_row: \
{"UiElement": ui_element_row[UI_ELEMENT_NAME].strip(),
"MainNavItem": ui_element_row[MAIN_NAV_ITEM].strip(),
"SubTabControl": ui_element_row[SUB_TAB_CONTROL].strip(),
"SubTab": ui_element_row[SUB_TAB].strip()}
stemmer_func = stemmer_meth(lang)
with open(file_path, 'r') as input_file:
for line in input_file:
if not line or separator not in line:
continue
row = line.splitlines()[0].strip()
row = row.split(separator)
line_to_stem = row[0]
stemmed_line = [stemmer_func(_term.lower())
for _term in line_to_stem.split(' ')
if _term and len(_term) >= 3 and
(_term.isalnum() or lang != EN)]
if not stemmed_line:
continue
for _term in stemmed_line:
ui_element_lst = row[1].split(',')
if len(ui_element_lst) != 4:
continue
term_and_ui_element_map[_term].append(
ui_element_map_for_term(ui_element_lst))
if output_file_path:
with open(output_file_path, 'w') as output_file:
json.dump(term_and_ui_element_map, output_file,
indent=indent if indent else None)
else:
print json.dumps(term_and_ui_element_map,
indent=indent if indent else None)
def main():
parser = argparse.ArgumentParser("Stemmer")
parser.add_argument('--input', dest='input', default=None,
type=str, help='Input text file to be stemmed')
parser.add_argument('--lang', dest='lang',
type=str, default=EN)
parser.add_argument('--indent', dest='indent', type=int,
default=0)
parser.add_argument('--output', dest='output', default=None,
type=str, help='Output file to write to')
args = parser.parse_args()
if not args.input:
print __doc__ % {'scriptName': __file__}
parser.error("No input text file provided")
stem_file(args.input, args.lang, args.output, args.indent)
if __name__ == "__main__":
main()
|
tiankangkan/paper_plane | king_spider/music_web_spider/__init__.py | Python | gpl-3.0 | 269 | 0 | import os
import sys
def current_file_directory():
return os.p | ath.dirname(os.path.realpath(__file__))
BASE_DIR = os.path.dirname(os.path.dirname(current_file_directory()))
sys.path.append(BASE_DIR)
os.envi | ron['DJANGO_SETTINGS_MODULE'] = 'paper_plane.settings'
|
pyphrb/myweb | app/plugin/nmap/libnmap/diff.py | Python | apache-2.0 | 2,896 | 0 | # -*- coding: utf-8 -*-
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict = current_dict
self.past_dict = past_dict
self.set_current = set(current_dict.keys())
self.set_past = set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return (set(o for o in self.intersect
if self.past_dict[o] != self.current_dict[o]))
def unchanged(self):
return (set(o for o in self.intersect
if self.past_dict[o] == self.current_dict[o]))
class NmapDiff(DictDiffer):
"""
NmapDiff compares two objects of same type to enable the user to check:
- what has changed
- what has been added
- what has been removed
- what was kept unchanged
NmapDiff inherit from DictDiffer which makes the actual comparaison.
The different methods from DictDiffer used by NmapDiff are the
following:
- NmapDiff.changed()
- NmapDiff.added()
- NmapDiff.removed()
- NmapDiff.unchanged()
Each of the returns a python set() of key which have changed in the
compared objects. To check the different keys that could be returned,
refer to | the get_dict() method of the objects you which to
compare (i.e: libnmap.objects.NmapHost, NmapService,...).
"""
def __init__(self, nmap_obj1, nmap_obj2):
"""
Constructor of NmapDiff:
- Checks if the two objects are of the same class
- Checks if the objects are "comparable" via a call to id() (dirty)
- Inherits from DictDiffer and
"""
if(nmap_obj1.__class__ != nmap_obj2.__class_ | _ or
nmap_obj1.id != nmap_obj2.id):
raise NmapDiffException("Comparing objects with non-matching id")
self.object1 = nmap_obj1.get_dict()
self.object2 = nmap_obj2.get_dict()
DictDiffer.__init__(self, self.object1, self.object2)
def __repr__(self):
return ("added: [{0}] -- changed: [{1}] -- "
"unchanged: [{2}] -- removed [{3}]".format(self.added(),
self.changed(),
self.unchanged(),
self.removed()))
class NmapDiffException(Exception):
def __init__(self, msg):
self.msg = msg
|
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbmonbindings.py | Python | apache-2.0 | 6,759 | 0.035804 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options impo | rt options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_ | util import nitro_util
class lbmonbindings(base_resource) :
""" Configuration for monitro bindings resource. """
def __init__(self) :
self._monitorname = ""
self._type = ""
self._state = ""
self.___count = 0
@property
def monitorname(self) :
ur"""The name of the monitor.<br/>Minimum length = 1.
"""
try :
return self._monitorname
except Exception as e:
raise e
@monitorname.setter
def monitorname(self, monitorname) :
ur"""The name of the monitor.<br/>Minimum length = 1
"""
try :
self._monitorname = monitorname
except Exception as e:
raise e
@property
def type(self) :
ur"""The type of monitor.<br/>Possible values = PING, TCP, HTTP, TCP-ECV, HTTP-ECV, UDP-ECV, DNS, FTP, LDNS-PING, LDNS-TCP, LDNS-DNS, RADIUS, USER, HTTP-INLINE, SIP-UDP, LOAD, FTP-EXTENDED, SMTP, SNMP, NNTP, MYSQL, MYSQL-ECV, MSSQL-ECV, ORACLE-ECV, LDAP, POP3, CITRIX-XML-SERVICE, CITRIX-WEB-INTERFACE, DNS-TCP, RTSP, ARP, CITRIX-AG, CITRIX-AAC-LOGINPAGE, CITRIX-AAC-LAS, CITRIX-XD-DDC, ND6, CITRIX-WI-EXTENDED, DIAMETER, RADIUS_ACCOUNTING, STOREFRONT, APPC, CITRIX-XNC-ECV, CITRIX-XDM.
"""
try :
return self._type
except Exception as e:
raise e
@property
def state(self) :
ur"""The state of the monitor.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._state
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbmonbindings_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbmonbindings
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.monitorname is not None :
return str(self.monitorname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the lbmonbindings resources that are configured on netscaler.
"""
try :
if type(name) != cls :
if type(name) is not list :
obj = lbmonbindings()
obj.monitorname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [lbmonbindings() for _ in range(len(name))]
obj = [lbmonbindings() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = lbmonbindings()
obj[i].monitorname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_, obj) :
ur""" Use this API to fetch filtered set of lbmonbindings resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client, obj) :
ur""" Use this API to count the lbmonbindings resources configured on NetScaler.
"""
try :
option_ = options()
option_.count = True
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_, obj) :
ur""" Use this API to count filtered the set of lbmonbindings resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.count = True
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Type:
PING = "PING"
TCP = "TCP"
HTTP = "HTTP"
TCP_ECV = "TCP-ECV"
HTTP_ECV = "HTTP-ECV"
UDP_ECV = "UDP-ECV"
DNS = "DNS"
FTP = "FTP"
LDNS_PING = "LDNS-PING"
LDNS_TCP = "LDNS-TCP"
LDNS_DNS = "LDNS-DNS"
RADIUS = "RADIUS"
USER = "USER"
HTTP_INLINE = "HTTP-INLINE"
SIP_UDP = "SIP-UDP"
LOAD = "LOAD"
FTP_EXTENDED = "FTP-EXTENDED"
SMTP = "SMTP"
SNMP = "SNMP"
NNTP = "NNTP"
MYSQL = "MYSQL"
MYSQL_ECV = "MYSQL-ECV"
MSSQL_ECV = "MSSQL-ECV"
ORACLE_ECV = "ORACLE-ECV"
LDAP = "LDAP"
POP3 = "POP3"
CITRIX_XML_SERVICE = "CITRIX-XML-SERVICE"
CITRIX_WEB_INTERFACE = "CITRIX-WEB-INTERFACE"
DNS_TCP = "DNS-TCP"
RTSP = "RTSP"
ARP = "ARP"
CITRIX_AG = "CITRIX-AG"
CITRIX_AAC_LOGINPAGE = "CITRIX-AAC-LOGINPAGE"
CITRIX_AAC_LAS = "CITRIX-AAC-LAS"
CITRIX_XD_DDC = "CITRIX-XD-DDC"
ND6 = "ND6"
CITRIX_WI_EXTENDED = "CITRIX-WI-EXTENDED"
DIAMETER = "DIAMETER"
RADIUS_ACCOUNTING = "RADIUS_ACCOUNTING"
STOREFRONT = "STOREFRONT"
APPC = "APPC"
CITRIX_XNC_ECV = "CITRIX-XNC-ECV"
CITRIX_XDM = "CITRIX-XDM"
class lbmonbindings_response(base_response) :
def __init__(self, length=1) :
self.lbmonbindings = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbmonbindings = [lbmonbindings() for _ in range(length)]
|
SickGear/SickGear | lib/hachoir_py3/parser/image/jpeg.py | Python | gpl-3.0 | 26,067 | 0.001113 | """
JPEG picture parser.
Information:
- APP14 documents
http://partners.adobe.com/public/developer/en/ps/sdk/5116.DCT_Filter.pdf
http://java.sun.com/j2se/1.5.0/docs/api/javax/imageio/metadata/doc-files/jpeg_metadata.html#color
- APP12:
http://search.cpan.org/~exiftool/Image-ExifTool/lib/Image/ExifTool/TagNames.pod
- JPEG Data Format
http://www.w3.org/Graphics/JPEG/itu-t81.pdf
Author: Victor Stinner, Robert Xiao
"""
from hachoir_py3.parser import Parser
from hachoir_py3.field import (FieldSet, ParserError, FieldError,
UInt8, UInt16, Enum, Field,
Bit, Bits, NullBits, NullBytes, PaddingBits,
String, RawBytes)
from hachoir_py3.parser.image.common import PaletteRGB
from hachoir_py3.core.endian import BIG_ENDIAN
from hachoir_py3.core.text_handler import textHandler, hexadecimal
from hachoir_py3.parser.image.exif import Exif
from hachoir_py3.parser.image.photoshop_metadata import PhotoshopMetadata
from hachoir_py3.parser.archive.zlib import build_tree
from hachoir_py3.core.tools import paddingSize, alignValue
MAX_FILESIZE = 100 * 1024 * 1024
# The four tables (hash/sum for color/grayscale JPEG) comes
# from ImageMagick project
QUALITY_HASH_COLOR = (
1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645,
632, 623, 613, 607, 600, 594, 589, 585, 581, 571,
555, 542, 529, 514, 494, 474, 457, 439, 424, 410,
397, 386, 373, 364, 351, 341, 334, 324, 317, 309,
299, 294, 287, 279, 274, 267, 262, 257, 251, 247,
243, 237, 232, 227, 222, 217, 213, 207, 202, 198,
192, 188, 183, 177, 173, 168, 163, 157, 153, 148,
143, 139, 132, 128, 125, 119, 115, 108, 104, 99,
94, 90, 84, 79, 74, 70, 64, 59, 55, 49,
45, 40, 34, 30, 25, 20, 15, 11, 6, 4,
0)
QUALITY_SUM_COLOR = (
32640, 32635, 32266, 31495, 30665, 29804, 29146, 28599, 28104, 27670,
27225, 26725, 26210, 25716, 25240, 24789, 24373, 23946, 23572, 22846,
21801, 20842, 19949, 19121, 18386, 17651, 16998, 16349, 15800, 15247,
14783, 14321, 13859, 13535, 13081, 12702, 12423, 12056, 11779, 11513,
11135, 10955, 10676, 10392, 10208, 9928, 9747, 9564, 9369, 9193,
9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347,
7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495,
5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698,
3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846,
1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128,
0)
QUALITY_HASH_GRAY = (
510, 505, 422, 380 | , 355, 338, 326, 318, 311, 305,
300, 297, 293, 291, 288, 286, 284, 283, 281, 280,
279, 278, 277, 273, 262, 251, 243, 233, 225, 218,
211, 205, 198, 193, 186, 181, 177, 172, 168, 164,
158, 156, 152, 148, 145, 142, 139, 136, 133, 131,
129, 126, 123, 120, 118, 115, 113, 110, 107, 105,
102, 100, 97, 94, 92, 89, 87, 83, 81, 79,
| 76, 74, 70, 68, 66, 63, 61, 57, 55, 52,
50, 48, 44, 42, 39, 37, 34, 31, 29, 26,
24, 21, 18, 16, 13, 11, 8, 6, 3, 2,
0)
QUALITY_SUM_GRAY = (
16320, 16315, 15946, 15277, 14655, 14073, 13623, 13230, 12859, 12560,
12240, 11861, 11456, 11081, 10714, 10360, 10027, 9679, 9368, 9056,
8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125,
5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616,
4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688,
3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952,
2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211,
2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477,
1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736,
667, 592, 518, 441, 369, 292, 221, 151, 86, 64,
0)
JPEG_NATURAL_ORDER = (
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63)
class JpegChunkApp0(FieldSet):
UNIT_NAME = {
0: "pixels",
1: "dots per inch",
2: "dots per cm",
}
def createFields(self):
yield String(self, "jfif", 5, "JFIF string", charset="ASCII")
if self["jfif"].value != "JFIF\0":
raise ParserError(
"Stream doesn't look like JPEG chunk (wrong JFIF signature)")
yield UInt8(self, "ver_maj", "Major version")
yield UInt8(self, "ver_min", "Minor version")
yield Enum(UInt8(self, "units", "Units"), self.UNIT_NAME)
if self["units"].value == 0:
yield UInt16(self, "aspect_x", "Aspect ratio (X)")
yield UInt16(self, "aspect_y", "Aspect ratio (Y)")
else:
yield UInt16(self, "x_density", "X density")
yield UInt16(self, "y_density", "Y density")
yield UInt8(self, "thumb_w", "Thumbnail width")
yield UInt8(self, "thumb_h", "Thumbnail height")
thumb_size = self["thumb_w"].value * self["thumb_h"].value
if thumb_size != 0:
yield PaletteRGB(self, "thumb_palette", 256)
yield RawBytes(self, "thumb_data", thumb_size, "Thumbnail data")
class Ducky(FieldSet):
BLOCK_TYPE = {
0: "end",
1: "Quality",
2: "Comment",
3: "Copyright",
}
def createFields(self):
yield Enum(UInt16(self, "type"), self.BLOCK_TYPE)
if self["type"].value == 0:
return
yield UInt16(self, "size")
size = self["size"].value
if size:
yield RawBytes(self, "data", size)
class APP12(FieldSet):
"""
The JPEG APP12 "Picture Info" segment was used by some older cameras, and
contains ASCII-based meta information.
"""
def createFields(self):
yield String(self, "ducky", 5, '"Ducky" string', charset="ASCII")
while not self.eof:
yield Ducky(self, "item[]")
class SOFComponent(FieldSet):
def createFields(self):
yield UInt8(self, "component_id")
yield Bits(self, "horiz_sample", 4, "Horizontal sampling factor")
yield Bits(self, "vert_sample", 4, "Vertical sampling factor")
yield UInt8(self, "quant_table", "Quantization table destination selector")
class StartOfFrame(FieldSet):
def createFields(self):
yield UInt8(self, "precision")
yield UInt16(self, "height")
yield UInt16(self, "width")
yield UInt8(self, "nr_components")
for index in range(self["nr_components"].value):
yield SOFComponent(self, "component[]")
class Comment(FieldSet):
def createFields(self):
yield String(self, "comment", self.size // 8, strip="\0")
class AdobeChunk(FieldSet):
COLORSPACE_TRANSFORMATION = {
1: "YCbCr (converted from RGB)",
2: "YCCK (converted from CMYK)",
}
def createFields(self):
if self.stream.readBytes(self.absolute_address, 5) != b"Adobe":
yield RawBytes(self, "raw", self.size // 8, "Raw data")
return
yield String(self, "adobe", 5, "\"Adobe\" string", charset="ASCII")
yield UInt16(self, "version", "DCT encoder version")
yield Enum(Bit(self, "flag00"),
{False: "Chop down or subsampling", True: "Blend"})
yield NullBits(self, "flags0_reserved", 15)
yield NullBytes(self, "flags1", 2)
yield Enum(UInt8(self, "color_transform", "Colorspace transformation code"), self.COLORSPACE_TRANSFORMATION)
class SOSComponent(FieldSet):
def createFields(self):
comp_id = UInt8(self, "component_id")
yield comp_id
if not(1 <= comp_id.value <= self["../nr_components"].value):
raise ParserError("JPEG error: Invalid component-id")
yield Bits(self, "dc_coding_table", 4, "DC entropy coding table destination selector")
yield Bits(self, "ac_coding_table", 4, "AC entropy coding table destination selector")
class StartOfScan(FieldSet):
def createFields(self):
yield UInt8(self, "nr_components")
for index in range(self["nr_components"].value):
yield SOSComponent(self, "componen |
Orangestar12/cacobot | main.py | Python | gpl-3.0 | 6,389 | 0.006261 | # per-module import for actioninja
# standard imports
import sys # for tracebaks in on_error.
import json # to load the config file.
import traceback # also used to print tracebacks. I'm a lazy ass.
import asyncio # because we're using the async branch of discord.py.
from random import choice # for choosing game ids
import discord # obvious.
# https://github.com/Rapptz/discord.py/tree/async
import cacobot # imports all plugins in the cacobot folder.
# A sample configs/config.json should be supplied.
with open('configs/config.json') as data:
config = json.load(data)
# log in
client = discord.Client(max_messages=100)
def aan(string):
'''Returns "a" or "an" depending on a string's first letter.'''
if string[0].lower() in 'aeiou':
return 'an'
else:
return 'a'
# random game status
async def random_game():
''' Changes the game in the bot's status. '''
while True:
name = choice(config['games'])
game = discord.Game(name=name)
await client.change_status(game=game)
await asyncio.sleep(3600)
@client.event
async def on_ready():
''' Executed when the bot successfully connects to Discord. '''
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
# pylint: disable=w1401
# pylint was freaking out about the ascii bullshit so I had to add that.
print("""
____ ____ _ ____ _
/ ___|__ _ ___ ___ | __ ) ___ | |_ | _ \ ___ __ _ __| |_ _
| | / _` |/ __/ _ \| _ \ / _ \| __| | |_) / _ \/ _` |/ _` | | | |
| |__| (_| | (_| (_) | |_) | (_) | |_ | _ < __/ (_| | (_| | |_| |
\____\____|\___\___/|____/ \___/ \__| |_| \_\___/\____|\____|\__ |
|___/
""")
await random_game()
@client.event
async def on_message(message):
'''
Executed when the bot recieves a message.
[message] is a discord.Message object, representing the sent message.
'''
cont = True
# execute Precommands
for func in cacobot.base.pres:
cont = await cacobot.base.pres[func](message, client)
if not cont:
return
if message.content.startswith(config['invoker']) and \
message.author.id != client.user.id and \
len(message.content) > 1:
command = message.content.split()[0][len(cacobot.base.config['invoker']):].lower()
# So basically if the message was ".Repeat Butt talker!!!" this
# would be "repeat"
if command in cacobot.base.functions:
if message.channel.is_private or\
message.channel.permissions_for(message.server.me).send_messages:
await client.send_typing(message.channel)
await cacobot.base.functions[command](message, client)
else:
print('\n===========\nThe bot cannot send messages to #{} in the server "{}"!\n===========\n\nThis message is only showing up because I *tried* to send a message but it didn\'t go through. This probably means the mod team has tried to disable this bot, but someone is still trying to use it!\n\nHere is the command in question:\n\n{}\n\nThis was sent by {}.\n\nIf this message shows up a lot, the bot might be disabled in that server. You should just make it leave if the mod team isn\'t going to just kick it!'.format(
message.channel.name,
message.server.name,
message.content,
message.author.name
)
) # pylint: disable=c0330
for func in cacobot.base.posts:
await cacobot.base.posts[func](message, client)
@client.event
async def on_error(*args):
'''
This event is basically a script-spanning `except` statement.
'''
# args[0] is the message that was recieved prior to the error. At least,
# it should be. We check it first in case the cause of the error wasn't a
# message.
print('An error has been caught.')
print(traceback.format_exc())
if len(args) > 1:
print(args[0], args[1])
if isinstance(args[1], discord.Message):
if args[1].author.id != client.user.id:
if args[1].channel.is_private:
print('This error was caused by a DM with {}.\n'.format(args[1].author))
else:
print(
'This error was caused by a message.\nServer: {}. Channel: #{}.\n'.format(
args[1].server.name,
args[1].c | hannel.name
)
)
if sys.exc_info()[0].__name | __ == 'Forbidden':
await client.send_message(
args[1].channel,
'You told me to do something that requires permissions I currently do not have. Ask an administrator to give me a proper role or something!')
elif sys.exc_info()[0].__name__ == 'ClientOSError' or sys.exc_info()[0].__name__ == 'ClientResponseError' or sys.exc_info()[0].__name__ == 'HTTPException':
await client.send_message(
args[1].channel,
'Sorry, I am under heavy load right now! This is probably due to a poor internet connection. Please submit your command again later.'
)
else:
await client.send_message(
args[1].channel,
'{}\n{}: You caused {} **{}** with your command.'.format(
choice(config['error_messages']),
args[1].author.name,
aan(sys.exc_info()[0].__name__),
sys.exc_info()[0].__name__)
)
client.run(config['token'])
# Here's the old manual-loop way of starting the bot.
# def main_task():
# '''
# I'm gonna be honest, I have *no clue* how asyncio works. This is all from
# the example in the docs.
# '''
# yield from client.login(config['email'], config['password'])
# yield from client.connect()
#
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main_task())
# loop.close()
# If you're taking the senic tour of the code, you should check out
# cacobot/__init__.py next.
|
davelab6/pyfontaine | fontaine/charsets/noto_glyphs/notosanscuneiform_regular.py | Python | gpl-3.0 | 42,569 | 0.023186 | # -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansCuneiform-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0173) #glyph00371
glyphs.append(0x030A) #glyph00778
glyphs.append(0x030B) #glyph00779
glyphs.append(0x02EB) #glyph00747
glyphs.append(0x024D) #glyph00589
glyphs.append(0x0001) #uniFEFF
glyphs.append(0x02E8) #glyph00744
glyphs.append(0x02E7) #glyph00743
glyphs.append(0x02E6) #glyph00742
glyphs.append(0x01AC) #glyph00428
glyphs.append(0x01AD) #glyph00429
glyphs.append(0x01AA) #glyph00426
glyphs.append(0x01AB) #glyph00427
glyphs.append(0x01A8) #glyph00424
glyphs.append(0x01A9) #glyph00425
glyphs.append(0x01A6) #glyph00422
glyphs.append(0x01A7) #glyph00423
glyphs.append(0x01A4) #glyph00420
glyphs.append(0x01A5) #glyph00421
glyphs.append(0x0302) #glyph00770
glyphs.append(0x0303) #glyph00771
glyphs.append(0x0183) #glyph00387
glyphs.append(0x0182) #glyph00386
glyphs.append(0x0181) #glyph00385
glyphs.append(0x0180) #glyph00384
glyphs.append(0x017F) #glyph00383
glyphs.append(0x017E) #glyph00382
glyphs.append(0x0044) #glyph00068
glyphs.append(0x0045) #glyph00069
glyphs.append(0x0042) #glyph00066
glyphs.append(0x0043) #glyph00067
glyphs.append(0x0040) #glyph00064
glyphs.append(0x0041) #glyph00065
glyphs.append(0x003E) #glyph00062
glyphs.append(0x003F) #glyph00063
glyphs.append(0x003C) #glyph00060
glyphs.append(0x003D) #glyph00061
glyphs.append(0x008D) #glyph00141
glyphs.append(0x008C) #glyph00140
glyphs.append(0x008F) #glyph00143
glyphs.append(0x008E) #glyph00142
glyphs.append(0x0091) #glyph00145
glyphs.append(0x0090) #glyph00144
glyphs.append(0x0093) #glyph00147
glyphs.append(0x0092) #glyph00146
glyphs.append(0x0095) #glyph00149
glyphs.append(0x0094) #glyph00148
glyphs.append(0x0277) #glyph00631
glyphs.append(0x0276) #glyph00630
glyphs.append(0x027D) #glyph00637
glyphs.append(0x027C) #glyph00636
glyphs.append(0x027B) #glyph00635
glyphs.append(0x027A) #glyph00634
glyphs.append(0x02DC) #glyph00732
glyphs.append(0x02DD) #glyph00733
glyphs.append(0x02DA) #glyph00730
glyphs.append(0x02DB) #glyph00731
glyphs.append(0x02E0) #glyph00736
glyphs.append(0x02E1) #glyph00737
glyphs.append(0x02DE) #glyph00734
glyphs.append(0x02DF) #glyph00735
glyphs.append(0x02E2) #glyph00738
glyphs.append(0x02E3) #glyph00739
glyphs.append(0x02E9) #glyph00745
glyphs.append(0x02E5) #glyph00741
glyphs.append(0x02E4) #glyph00740
glyphs.append(0x03CB) #glyph00971
glyphs.append(0x0314) #glyph00788
glyphs.append(0x0374) #glyph00884
glyphs.append(0x0375) #glyph00885
glyphs.append(0x0376) #glyph00886
glyphs.append(0x0377) #glyph00887
glyphs.append(0x0370) #glyph00880
glyphs.append(0x0371) #glyph00881
glyphs.append(0x0372) #glyph00882
glyphs.append(0x0373) #glyph00883
glyphs.append(0x039F) #glyph00927
glyphs.append(0x039E) #glyph00926
glyphs.append(0x039D) #glyph00925
glyphs.append(0x039C) #glyph00924
glyphs.append(0x0378) #glyph00888
glyphs.append(0x0379) #glyph00889
glyphs.append(0x0399) #glyph00921
glyphs.append(0x0398) #glyph00920
glyphs.append(0x02ED) #glyph00749
glyphs.append(0x03CA) #glyph00970
glyphs.append(0x02EC) #glyph00748
glyphs.append(0x03D3) #glyph00979
glyphs.append(0x0032) #glyph00050
glyphs.append(0x0239) #glyph00569
glyphs.append(0x0238) #glyph00568
glyphs.append(0x0237) #glyph00567
glyphs.append(0x0236) #glyph00566
glyphs.append(0x0235) #glyph00565
glyphs.append(0x0234) #glyph00564
glyphs.append(0x0233) #glyph00563
glyphs.append(0x0232) #glyph00562
glyphs.append(0x0231) #glyph00561
glyphs.append(0x0230) #glyph00560
glyphs.append(0x00CE) #glyph00206
glyphs.append(0x00CF) #glyph00207
glyphs.append(0x00CC) #glyph00204
glyphs.append(0x00CD) #glyph00205
glyphs.append(0x00CA) #glyph00202
glyphs.append(0x00CB) #glyph00203
glyphs.append(0x00C8) #glyph00200
glyphs.append(0x00C9) #glyph00201
glyphs.append(0x0169) #glyph00361
glyphs.append(0x0168) #glyph00360
glyphs.append(0x016B) #glyph00363
glyphs.append(0x016A) #glyph00362
glyphs.append(0x016D) #glyph00365
glyphs.append(0x016C) #glyph00364
glyphs.append(0x00D0) #glyph00208
glyphs.append(0x00D1) #glyph00209
glyphs.append(0x032D) #glyph00813
| glyphs.append(0x032C) #glyph00812
glyphs.append(0x032B) #glyph00811
glyphs.append(0x032A) #glyph00810
glyphs.append(0x0331) #glyph00817
glyphs.append(0x0330) #glyph00816
glyphs.append(0x032F) #glyph00815
glyphs.append(0x032E) #glyph00814
glyphs.append(0x03B8) #glyph00952
glyphs.append(0x03B9) #glyph00953
glyphs.append(0x0333) #glyph00819
glyphs.append(0x0332) #glyph00818
glyphs.append(0x03B | C) #glyph00956
glyphs.append(0x03BD) #glyph00957
glyphs.append(0x03BA) #glyph00954
glyphs.append(0x021C) #glyph00540
glyphs.append(0x02BF) #glyph00703
glyphs.append(0x037C) #glyph00892
glyphs.append(0x0206) #glyph00518
glyphs.append(0x0207) #glyph00519
glyphs.append(0x0388) #glyph00904
glyphs.append(0x0200) #glyph00512
glyphs.append(0x0201) #glyph00513
glyphs.append(0x01FE) #glyph00510
glyphs.append(0x01FF) #glyph00511
glyphs.append(0x0204) #glyph00516
glyphs.append(0x0205) #glyph00517
glyphs.append(0x0202) #glyph00514
glyphs.append(0x0203) #glyph00515
glyphs.append(0x037E) #glyph00894
glyphs.append(0x006D) #glyph00109
glyphs.append(0x006C) #glyph00108
glyphs.append(0x0069) #glyph00105
glyphs.append(0x0068) #glyph00104
glyphs.append(0x006B) #glyph00107
glyphs.append(0x006A) #glyph00106
glyphs.append(0x0065) #glyph00101
glyphs.append(0x0064) #glyph00100
glyphs.append(0x0067) #glyph00103
glyphs.append(0x0066) #glyph00102
glyphs.append(0x02A5) #glyph00677
glyphs.append(0x02A4) #glyph00676
glyphs.append(0x02A3) #glyph00675
glyphs.append(0x02A2) #glyph00674
glyphs.append(0x02A1) #glyph00673
glyphs.append(0x02A0) #glyph00672
glyphs.append(0x029F) #glyph00671
glyphs.append(0x029E) #glyph00670
glyphs.append(0x0308) #glyph00776
glyphs.append(0x0309) #glyph00777
glyphs.append(0x0306) #glyph00774
glyphs.append(0x0307) #glyph00775
glyphs.append(0x0304) #glyph00772
glyphs.append(0x0305) #glyph00773
glyphs.append(0x02A7) #glyph00679
glyphs.append(0x02A6) #glyph00678
glyphs.append(0x01D7) #glyph00471
glyphs.append(0x01D6) #glyph00470
glyphs.append(0x01D9) #glyph00473
glyphs.append(0x01D8) #glyph00472
glyphs.append(0x01DB) #glyph00475
glyphs.append(0x01DA) #glyph00474
glyphs.append(0x01DD) #glyph00477
glyphs.append(0x01DC) #glyph00476
glyphs.append(0x01DF) #glyph00479
glyphs.append(0x01DE) #glyph00478
glyphs.append(0x03A1) #glyph00929
glyphs.append(0x0355) #glyph00853
glyphs.append(0x001F) #glyph00031
glyphs.append(0x001E) #glyph00030
glyphs.append(0x0021) #glyph00033
glyphs.append(0x00 |
maxamillion/atomic-reactor | tests/plugins/test_tag_from_config.py | Python | bsd-3-clause | 5,924 | 0.000169 | """
Copyright (c) 2016 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from flexmock import flexmock
import pytest
import os.path
from atomic_reactor.build import BuildResult
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PostBuildPluginsRunner, PluginFailedException
from atomic_reactor.plugins.post_tag_from_config import TagFromConfigPlugin
from atomic_reactor.util import ImageName, df_parser
from atomic_reactor.constants import INSPECT_CONFIG
from tests.constants import (MOCK_SOURCE, MOCK, IMPORTED_IMAGE_ID)
from tests.fixtures import docker_tasker # noqa
if MOCK:
from tests.docker_mock import mock_docker
DF_CONTENT_LABELS = '''\
FROM fedora
LABEL "name"="name_value"
LABEL "version"="version_value"
LABEL "release"="$parentrelease"
'''
class MockSource(object):
def __init__(self, tmpdir):
tmpdir = str(tmpdir)
self.dockerfile_path = os.path.join(tmpdir, 'Dockerfile')
self.path = tmpdir
def get_build_file_path(self):
return self.dockerfile_path, self.path
class X(object):
image_id = "xxx"
base_image = ImageName.parse("fedora")
def mock_additional_tags_file(tmpdir, tags):
file_path = os.path.join(tmpdir, 'additional-tags')
with open(file_path, 'w') as f:
for tag in tags:
f.write(tag + '\n')
return file_path
def mock_workflow(tmpdir):
if MOCK:
mock_docker()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
mock_source = MockSource(tmpdir)
setattr(workflow, 'builder', X)
workflow.builder.source = mock_source
flexmock(workflow, source=mock_source)
df = df_parser(str(tmpdir))
setattr(workflow.builder, 'df_path', df.dockerfile_path)
return workflow
@pytest.mark.parametrize(('tags', 'name', 'expected'), [ # noqa
([], 'fedora', []),
(['spam'], 'fedora', ['fedora:spam']),
(['spam', 'bacon'], 'foo', ['foo:spam', 'foo:bacon']),
# ignore tags with hyphens
(['foo-bar', 'baz'], 'name', ['name:baz']),
# make sure that tags are also valid
(['illegal@char', '.starts.with.dot'], 'bar', []),
(['has_under', 'ends.dot.'], 'bar', ['bar:has_under', 'bar:ends.dot.']),
(None, 'fedora', []),
])
def test_tag_from_config_plugin_generated(tmpdir, docker_tasker, tags, name,
expected):
workflow = mock_workflow(tmpdir)
workflow.built_image_inspect = {
INSPECT_CONFIG: {'Labels': {'Name': name}}
}
workflow.build_result = BuildResult(image_id=IMPORTED_IMAGE_ID)
# Simulate missing additional-tags file.
if tags is not None:
mock_additional_tags_file(str(tmpdir), tags)
runner = PostBuildPluginsRunner(
docker_tasker,
workflow,
[{'name': TagFromConfigPlugin.key}]
)
results = runner.run()
plugin_result = results[TagFromConfigPlugin.key]
assert plugin_result == expected
@pytest.mark.parametrize(('inspect', 'error'), [ # noqa
({'Labels': {}}, "KeyError('name'"),
({}, "KeyError('Labels'"),
(None, "RuntimeError('There is no inspect data"),
])
def test_bad_inspect_data(tmpdir, docker_tasker, inspect, error):
workflow = mock_workflow(tmpdir)
if inspect is not None:
workflow.built_image_inspect = {
INSPECT_CONFIG: inspect
}
workflow.build_result = BuildResult(image_id=IMPORTED_IMAGE_ID)
mock_additional_tags_file(str(tmpdir), ['spam', 'bacon'])
runner = PostBuildPluginsRunner(
docker_tasker,
workflow,
[{'name': TagFromConfigPlugin.key}]
)
with pytest.raises(PluginFailedException) as exc:
runner.run()
assert error in str(exc)
@pytest.mark.parametrize(('unique_tags', 'primary_tags', 'expected'), [ # noqa
(None, None, ['name_value:get_tags', 'name_value:file_tags']),
([], [], []),
(['foo', 'bar'], [], ['name_value:foo', 'name_value:bar']),
([], ['foo', 'bar'], ['name_value:foo', 'name_value:bar']),
([], ['foo', '{unknown}', 'bar'], None),
([], ['foo', '{version}', 'bar'], ['name_value:foo', 'name_value:version_value',
'name_value:bar']),
([], ['foo', '{version}-{release}', 'bar'],
['name_value:foo', 'name_value:version_value-7.4.1', 'name_value:bar']),
(['foo', 'bar'], ['{version}'], ['name_value:foo', 'name_value:bar',
'name_value:version_value']),
(['foo', 'bar'], ['{version}-{release}'],
['name_value:foo', 'name_value:bar', 'name_value:version_value-7.4.1']),
])
def test_tag_parse(tmpdir, docker_tasker, unique_tags, primary_tags, expected):
df = df_parser(str(tmpdir))
df.content = DF_CONTENT_LABELS
workflow = mock_workflow(tmpdir)
setattr(workflow.builder, 'df_path', df.dockerfile_path)
workflow.build_result = BuildResult.make_remote_image_result()
flexmock(workflow, base_image_inspect={
INSPECT_CONFIG: {
'Labels': {'parentrelease': '7.4.1'},
'Env': {'parentrelease': '7.4.1'},
}
})
mock_additional_tags_file(str(tmpdir), ['get_tags', 'file_tags'])
if unique_tags is not None and primary_tags i | s not None:
input_tags = {
'unique': unique_tags,
'primary': primary_tags
}
else:
input_tags = None
runner = PostBuildPluginsRunner(
docker_tasker,
workflow,
[{'name': TagFromConfigPlugin.key,
'args': {'tag_suffixes': input_tags}}]
)
if expected is not None:
| results = runner.run()
plugin_result = results[TagFromConfigPlugin.key]
assert plugin_result == expected
else:
with pytest.raises(PluginFailedException):
runner.run()
|
rainslytherin/ansible | lib/ansible/inventory/script.py | Python | gpl-3.0 | 9,853 | 0.008769 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import os
import subprocess
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
from ansible import utils
from ansible import errors
import sys
class InventoryScript(object):
'''
Host inventory parser for ansibl | e using external inventory scripts.
当inventory文件为可执行的脚本时的处理逻辑。
'''
def __init__(self, filename=C.DEFAULT_HOST_LIST):
| # Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
# directory when '.' is not in PATH.
self.filename = os.path.abspath(filename) # 获取文件的绝对路径
cmd = [ self.filename, "--list" ] # 可执行的inventory脚本需要支持--list参数
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # 使用subprocessPopen执行shell命令
except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate() # 获取标准输出
if sp.returncode != 0: # 如果脚本的返回值(echo $?) 不为0,则报错!
raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout # 将标准输出存入self.data
# see comment about _meta below
self.host_vars_from_top = None
self.groups = self._parse(stderr) # 解析数据,不过干嘛传入个err。。。蛋疼,ansible的代码也有写的比较烂的地方
def _parse(self, err):
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
self.raw = utils.parse_json(self.data) # 还是使用self.data来解析标准输出,需要self.data为json格式数据
self.raw = json_dict_bytes_to_unicode(self.raw) # 将self.raw中的kv都转换成unicode格式。
all = Group('all') # 设置Group("all")
groups = dict(all=all) # 初始化groups字典
group = None
if 'failed' in self.raw: # 如果self.raw中有failed字段,则报错。不过在上面parser_json的时候no_exception是false,不会出现failed情况
sys.stderr.write(err + "\n")
raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
for (group_name, data) in self.raw.items():
# 1.3 以上版本使用--list的时返回结果中会包含_meta这样的key,该key的value中会有一个hostvars变量,该变量包含每个host的主机变量
# 1.2及以下版本仍然需要使用--host命令为每一个host返回主机变量
# in Ansible 1.3 and later, a "_meta" subelement may contain
# a variable "hostvars" which contains a hash for each host
# if this "hostvars" exists at all then do not call --host for each
# host. This is for efficiency and scripts should still return data
# if called with --host for backwards compat with 1.2 and earlier.
"""
{
"databases" : {
"hosts" : [ "host1.example.com", "host2.example.com" ],
"vars" : {
"a" : true
}
},
"webservers" : [ "host2.example.com", "host3.example.com" ],
"atlanta" : {
"hosts" : [ "host1.example.com", "host4.example.com", "host5.example.com" ],
"vars" : {
"b" : false
},
"children": [ "marietta", "5points" ]
},
"marietta" : [ "host6.example.com" ],
"5points" : [ "host7.example.com" ]
}
{
# results of inventory script as above go here
# ...
"_meta" : {
"hostvars" : {
"moocow.example.com" : { "asdf" : 1234 },
"llama.example.com" : { "asdf" : 5678 },
}
}
{
"moocow.example.com" : { "asdf" : 1234 },
"llama.example.com" : { "asdf" : 5678 }
}
"""
if group_name == '_meta': # 如果key为_meta,则该value为meta数据。
if 'hostvars' in data:
self.host_vars_from_top = data['hostvars'] # 如果meta数据中包含hostvars,则缓存到self.host_vars_from_top中。
continue # 跳过之后的处理
if group_name != all.name: # group_name不是all则创建新的Group对象,并加入groups字典。
group = groups[group_name] = Group(group_name) # 如果data中出现无group的hostname,则会出现以hostname为名称的组
else:
group = all
host = None
if not isinstance(data, dict): # 如果data不是字典类型,则表示data为主机列表
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
elif not any(k in data for k in ('hosts','vars','children')): # any函数表示可迭代对象中任何一个为True,则为True.否则为False
# 如果data对象是字典类型,但key中不存在hosts、vars、children时,既该行数据为主机变量数据时,group_name为主机名
data = {'hosts': [group_name], 'vars': data}
# 上面两步为了统一data的格式,不带变量的data,格式为{'hosts': [host1,host2...]
# 带变量的data,格式为: {'hosts': [group_name], 'vars': data }
if 'hosts' in data:
if not isinstance(data['hosts'], list): # 主机列表必须是list对象,上面的'hosts': [group_name] 也是为校验统一格式
raise errors.AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']: # 遍历主机名
if not hostname in all_hosts:
all_hosts[hostname] = Host(hostname) # 如果主机对象不存在, 则创建并添加到all_hosts列表中,去重
host = all_hosts[hostname] # 将Host对象赋值给host变量
group.add_host(host) # 将Host对象添加到该group中
if 'vars' in data: # 如果是带有vars的data
if not isinstance(data['vars'], dict):
raise errors.AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in data['vars'].iteritems(): # 遍历所有的变量
if group.name == all.name: # 如果当前组名为“all",则将变量加到all group中,否则加到当前group中。
all.set_variable(k, v)
else:
group.set_variable(k, v)
# Separate loop to ensure all groups are defined
for (group_name, data) in self.raw.items():
if group_name == '_meta':
continue
if isinstance(data, dict) and 'children' in data: # 如果data中包含子组,遍历子组并将子组添加到父组中
for child_name in data['children']:
if child_name in groups:
groups[group_name].add_child_group(groups[child_name])
for group in groups.values():
if group.depth == 0 and group.name != 'all': # 如果该组深度为0,且名称不是"all",则加入all组的子组列表中。
all.add_child_group(group)
return groups
def get_host_variables(self, host):
""" Runs <script> --host <hostname> to determine additional host variables """
# <script> --host <hostname> 方式获取单个hostname的主机变量
if self.host_vars_from_top is not None:
got = self.host_vars_from_top.get(host.name, {}) # 如果存在缓存中,则直接返回
return got
cmd = [self.filename, "--host", host.name]
try:
sp = subprocess.Popen(cmd, stdo |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/pycoverage/setup.py | Python | mit | 6,151 | 0.007804 | # setup.py for coverage.py
"""Code coverage measurement for Python
Coverage.py measures code coverage, typically during test execution. It uses
the code analysis tools and tracing hooks provided in the Python standard
library to determine which lines are executable, and which have been executed.
Coverage.py runs on Pythons 2.3 through 3.3, and PyPy 1.9.
Documentation is at `nedbatchelder.com <%s>`_. Code repository and issue
tracker are on `Bitbucket <http://bitbucket.org/ned/coveragepy>`_, with a
mirrored repo on `Github <https://github.com/nedbat/coveragepy>`_.
New in 3.7: ``--debug``, and 12 bugs closed.
New in 3.6: ``--fail-under``, and >20 bugs closed.
New in 3.5: Branch coverage exclusions, keyboard shortcuts in HTML report.
New in 3.4: Better control over source to measure, and unexecuted files
can be reported.
New in 3.3: .coveragerc files.
New in 3.2: Branch coverage!
"""
# This file is used unchanged under all versions of Python, 2.x and 3.x.
classifiers = """\
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Operating System :: OS Independent
Programming Language :: Python :: 2
Programming Language :: Python :: 3
Topic :: Software Development :: Quality Assurance
Topic :: Software Development :: Testing
"""
# Pull in the tools we need.
import os, sys
from setuptools import setup
from distutils.core import Extension # pylint: disable=E0611,F0401
from distutils.command.build_ext import build_ext # pylint: disable=E0611,F0401,C0301
from distutils import errors # pylint: disable=E0611,F0401
# Get or massage our metadata. We exec coverage/version.py so we can avoid
# importing the product code into setup.py.
doc = __doc__ # __doc__ will be overwritten by version.py.
__version__ = __url__ = "" # Keep pylint happy.
cov_ver_py = os.path.join(os.path.split(__file__)[0], "coverage/version.py")
version_file = open(cov_ver_py)
try:
exec(compile(version_file.read(), cov_ver_py, 'exec'))
finally:
version_file.close()
doclines = (doc % __url__).splitlines()
classifier_list = classifiers.splitlines()
if 'a' in __version__:
devstat = "3 - Alpha"
elif 'b' in __version__:
devstat = "4 - Beta"
else:
devstat = "5 - Production/Stable"
classifier_list.append("Development Status :: " + devstat)
# Install a script as "coverage", and as "coverage[23]", and as
# "coverage-2.7" (or whatever).
scripts = [
'coverage = coverage:main',
'coverage%d = coverage:main' % sys.version_info[:1],
'coverage-%d.%d = coverage:main' % sys.version_info[:2],
]
# Create the keyword arguments for setup()
setup_args = dict(
name = 'coverage',
version = __version__,
packages = [
'coverage',
],
package_data = {
'coverage': [
'htmlfiles/*.*',
]
},
entry_points = {'console_scripts': scripts},
# We need to get HTML assets from our htmlfiles dir.
zip_safe = False,
author = 'Ned Batchelder and others',
aut | hor_email = 'ned@nedbatchelder.com',
description = doclines[0 | ],
long_description = '\n'.join(doclines[2:]),
keywords = 'code coverage testing',
license = 'BSD',
classifiers = classifier_list,
url = __url__,
)
# A replacement for the build_ext command which raises a single exception
# if the build fails, so we can fallback nicely.
ext_errors = (
errors.CCompilerError,
errors.DistutilsExecError,
errors.DistutilsPlatformError,
)
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
class BuildFailed(Exception):
"""Raise this to indicate the C extension wouldn't build."""
def __init__(self):
Exception.__init__(self)
self.cause = sys.exc_info()[1] # work around py 2/3 different syntax
class ve_build_ext(build_ext):
"""Build C extensions, but fail with a straightforward exception."""
def run(self):
"""Wrap `run` with `BuildFailed`."""
try:
build_ext.run(self)
except errors.DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
"""Wrap `build_extension` with `BuildFailed`."""
try:
# Uncomment to test compile failures:
# raise errors.CCompilerError("OOPS")
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3
raise BuildFailed()
raise
# There are a few reasons we might not be able to compile the C extension.
# Figure out if we should attempt the C extension or not.
compile_extension = True
if sys.platform.startswith('java'):
# Jython can't compile C extensions
compile_extension = False
if '__pypy__' in sys.builtin_module_names:
# Pypy can't compile C extensions
compile_extension = False
if compile_extension:
setup_args.update(dict(
ext_modules = [
Extension("coverage.tracer", sources=["coverage/tracer.c"])
],
cmdclass = {
'build_ext': ve_build_ext,
},
))
# Py3.x-specific details.
if sys.version_info >= (3, 0):
setup_args.update(dict(
use_2to3 = False,
))
def main():
"""Actually invoke setup() with the arguments we built above."""
# For a variety of reasons, it might not be possible to install the C
# extension. Try it with, and if it fails, try it without.
try:
setup(**setup_args)
except BuildFailed:
msg = "Couldn't install with extension module, trying without it..."
exc = sys.exc_info()[1]
exc_msg = "%s: %s" % (exc.__class__.__name__, exc.cause)
print("**\n** %s\n** %s\n**" % (msg, exc_msg))
del setup_args['ext_modules']
setup(**setup_args)
if __name__ == '__main__':
main()
|
ram8647/gcb-mobilecsp | modules/help_urls/topics.py | Python | apache-2.0 | 6,586 | 0.000607 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Help URL topic mappings."""
__author__ = [
'John Cox (johncox@google.com)',
]
class _LegacyUrl(object):
"""A legacy URL, where the value is taken verbatim instead of calculated."""
def __init__(self, value):
self.value = value
# Mappings. Each row is a (topic_id, url_suffix), where topic_id is a string
# containing the unique identifier of the help topic, and url_suffix is a string
# giving the suffix of the help URL. Neither may be missing or empty. url_suffix
# is relative to the version component of the URL. If you have a URL of the form
#
# https://www.google.com/edu/openonline/course-builder/docs/1.10/something,
#
# the value to put here is '/something'.
_ALL = [
('certificate:certificate_criteria',
'/prepare-for-students/certificates.html'),
('core_tags:google_drive:unavailable',
'/create-a-course/add-content/google-drive.html'
'#enable-apis'),
('core_tags:google_group:name',
'/create-a-course/add-content/content-editor.html'
'#google-group'),
('core_tags:markdown:markdown',
'/create-a-course/add-content/content-editor.html'
'#markdown'),
('course:advanced:description',
'/create-a-course/course-settings.html'
'#advanced-course-settings'),
('course:assessment:content',
'/create-a-course/add-elements/assessments/assessments.html'),
('course:assessment:html_content',
'/create-a-course/add-elements/assessments/assessments.html'),
('course:assessment:review_form',
'/create-a-course/add-elements/assessments/peer-review.html'
'#reviewer-feedback-form'),
('course:assessment:review_opts',
'/create-a-course/add-elements/assessments/peer-review.html'
'#peer-review-fields'),
('course:assessment:snippet',
'/create-a-course/add-elements/assessments/assessments.html'
'#embed-link'),
('course:assessment:workflow:grader',
'/create-a-course/add-elements/assessments/assessments.html'
'#grading-method'),
('course:auto_index',
'/create-a-course/course-settings.html'
'#auto-index-course'),
('course:availability:availability',
'/publish-a-course/availability.html'
'#content-availability'),
('course:availability:shown_when_unavailable',
'/publish-a-course/availability.html'
'#content-availability'),
('course:can_record_student_events',
'/create-a-course/course-settings.html'
'#enable-student-analytics'),
('course:can_student_change_locale',
'/prepare-for-students/translations/set-up.html'
'#show-language-picker'),
('course:google:api_key',
'/create-a-course/add-content/google-drive.html'
'#get-credentials'),
('course:google:client_id',
'/create-a-course/add-content/google-drive.html'
'#get-credentials'),
('course:google_analytics_id',
'/create-a-course/course-settings.html'
'#analytics-id'),
('course:google_tag_manager_id',
'/create-a-course/course-settings.html'
'#tag-manager-id'),
('course:lesson:activity',
'/create-a-course/add-elements/lessons/add-new.html'
'#lesson-body'),
('course:lesson:manual_progress',
'/create-a-course/add-elements/lessons/settings.html'
'#allow-progress-override'),
('course:main_image:url',
'/create-a-course/course-settings.html'
'#image-or-video'),
('course:unit:manual_progress',
'/create-a-course/add-elements/units/details-and-settings.html'
'#allow-progress-override'),
('course:welcome_notifications_sender',
'/prepare-for-students/registration.html'
'#email-sender'),
('dashboard:gift_questions:questions',
'/create-a-course/add-elements/questions/formats.html'
'#gift-questions'),
('dashboard:powered_by',
'/index.html'),
('data_pump:json_key',
'/analyze-data/custom-analytics.html'
'#data-pump-values'),
('data_pump:pii_encryption_token',
'/analyze-data/custom-analytics.html'
'#data-pump-values'),
('data_pump:project_id',
'/set-up-course-builder/create-a-cloud-project.html'
'#set-project-id'),
('data_pump:table_lifetime',
'/analyze-data/custom-analytics.html'
'#data-pump-values'),
('d | ata_removal:removal_policy',
'/create-a-course/course-settings.html'
'#removal-policy'),
('help:documentation',
'/index.html'),
('help:forum', _LegacyUrl(
'https://groups.google.com/for | um/?fromgroups#!categories/'
'course-builder-forum/general-troubleshooting')),
('help:videos', _LegacyUrl(
'https://www.youtube.com/playlist?list=PLFB_aGY5EfxeltJfJZwkjqDLAW'
'dMfSpES')),
('labels:tracks',
'/create-a-course/organize-elements/tracks.html'),
('math:math:input_type',
'/create-a-course/add-content/content-editor.html'
'#math-formula'),
('modules:guide:availability',
'/administer-site/guides.html'
'#availability'),
('modules:guide:enabled',
'/administer-site/guides.html'
'#enable-guides'),
('modules:webserv:availability',
'/administer-site/web-server.html'
'#availability'),
('modules:webserv:doc_root',
'/administer-site/web-server.html'
'#content-root'),
('modules:webserv:enabled',
'/administer-site/web-server.html'
'#enable-web-server'),
('questionnaire:questionnaire:disabled',
'/create-a-course/add-content/content-editor.html'
'#questionnaire'),
('questionnaire:questionnaire:form_id',
'/create-a-course/add-content/content-editor.html'
'#questionnaire'),
('reg_form:additional_registration_fields',
'/prepare-for-students/registration.html'
'#registration-questions'),
('settings:debugging:show_hooks',
'/debug-course/debug-course.html'),
('settings:debugging:show_jinja_context',
'/debug-course/debug-course.html'),
('workflow:review_window_mins',
'/create-a-course/add-elements/assessments/peer-review.html'
'#review-window-timeout'),
]
|
shaunduncan/helga-oneliner | helga_oneliner.py | Python | mit | 8,137 | 0.002116 | # -*- coding: utf8 -*-
import random
import re
from helga.plugins import match
def imgur(image):
"""
Returns an imgur link with a given hash
"""
return 'http://i.imgur.com/{0}.gif'.format(image)
RESPONSES = {
# Direct text responses
r'(gross|disgusting|eww)': (imgur('XEEI0Rn'),), # Dumb and Dumber Gag
r'(\sGFY\s|GTFO|Fuck (You|Off))': (imgur('VPqgYjF'), # Ryan Stiles pulling middle finger from pocket
imgur('rWhZY3k'),), # half baked
r'womp womp': ("http://www.sadtrombone.com/?play=true",
"http://www.youtube.com/watch?v=_-GaXa8tSBE"),
r'^:w?q$': ("this ain't your vi",),
r'^(pwd$|(sudo|ls|cd|rm)(\s\w+|$))': "this ain't your shell",
r'php': ("php is just terrible",
"php's motto: MERGE ALL THE PULL REQUESTS"),
r'^select( .* )from(.*)': "'; DROP TABLES;",
r'mongo(db)?\s': 'http://youtu.be/b2F-DItXtZs', # MongoDB is webscale
r'gem install': "ruby. not even once.",
r'\\m/': 'rock on',
r'((beetle|betel)(geuse|juice)\s?){3}': "i'm the ghost with the most",
# lol, gifs
r'(bravo|well done)': (imgur('wSvsV'), # Citizen Kane slow clap
imgur('HUKCsCv'), # Colbert & Stewart bravo
imgur('FwqHZ6Z')), # Gamer conceding defeat
r'is \w+ down\?': imgur('yX5o8rZ'), # THE F5 HAMMER
r"(i don't care|do i look like i care|zero fucks)": (
imgur('oKydfNm'), # Bird bouncing on hawk's head
imgur('KowlC'), # Gangam style 'do i look like i care'
imgur('xYOqXJv'), # Dog hitting cat with tail
imgur('1b2YNU3'), # But wait! bubble
),
r'^nope$': (imgur('iSm1aZu'), # Arrested development NOPE
imgur('2xwe756'), # Lonley island like a boss NOPE
imgur('zCtbl'), # Tracy Morgan NOPE
imgur('foEHo'), # Spongebob buried in sand
imgur('xKYs9'), # Puppy does not like lime
imgur('ST9lw3U'), # Seinfeld I'm Out
imgur('c4gTe5p'), # Cat thriller walk I'm out
'http://i.minus.com/iUgVCKwjISSke.gif', # The Nope Bader
),
r'tl;?dr': (imgur('dnMjc'), # Lightsaber did not read
imgur('V2H9y')), # Craig Robinson did not read
r'panic': (imgur('tpGQV'), # Aladding start panicking
imgur('WS4S2'), # Colbert screaming in terror
imgur('rhNOy3I'), # Panic cat bug eyes
imgur('SNvM6CZ'), # Girl leans on escalator handrail
imgur('H7PXV'), # Ain't nobody got time for that
imgur('fH9e2')), # Out of control truck on collision course
r'shock(ed|ing)?': (imgur('zVyOBlR'), # Cartoon is shocked
imgur('Q4bI5' | ), # Shocked cat is shocked
imgur('wdA2Z'), # Monsters Inc watching Boo in compactor
imgur('nj3yp'), # Spock is shocked
i | mgur('AGnOQ'), # PeeWee is shocked
imgur('wkY1FUI'), # Shocked looks around
imgur('AXuUYIj')), # Simpsons jaw drop
r'(bloody mary|vodka)': imgur('W9SS4iJ'), # Archer: Bloody Mary, blessed are you among cocktails
r'popcorn': (imgur('00IJgSZ'), # Thriller popcorn
imgur('5px9l')), # Colbert popcorn
r'deal with it': (imgur('12WoH'), # Slip n slide DWI
imgur('6E6n1'), # WTF Oprah
imgur('hYdy4'), # Baseball catch deal with it
imgur('pqmfX'), # WTF pouring water from nose
imgur('9WbAL'), # A three toed sloth in a chair
imgur('KdldmZk'), # Polar bear jumping out of water
imgur('49UtI5N'), # The Fresh Prince of DEAL WITH IT
imgur('1pkNeOy'), # Skyler
imgur('KzEXQDq'), # Tom & Jerry
imgur('1kxk9z6'), # deal with it dance
u'(⌐■_■)',
# Multiline
(u'( •_•)',
u'( •_•)>⌐■-■',
u'(⌐■_■)',
'deal with it'),
(u'. B :-|',
u'. B :-|',
u'. B :-|',
u'. B-| deal with it')),
r'(mind blown|blew my mind)': (imgur('U6kCXUp'), # Head asploding
imgur('1HMveGj')), # Tim and Eric mind blown
r'(sweet jesus|mother of god)': (imgur('5vXdAOV'), # Captain Kirk
imgur('g155Wra'), # Star Trek freaking out
imgur('dyeHb'), # BJ Novak looks confused
imgur('VkHiG6D'), # Face twitching
imgur('aiH4Mts'), # Christopher Lloyd realizes something
imgur('nOJme'), # Cookie monster sweet jesus
imgur('KtdHWhs'), # Fight club realization
imgur('z5hhSsU'), # Cat with toy: OMG it was you!
imgur('zuc9tAm')), # Dinosaurs show - drops beer
r'nailed it': (imgur('KsQzQTF'), # Cat not trying to catch rat
imgur('5nrEk'), # Olympic diving fail
imgur('n9zw0'), # squirrel spinning on bird feeder
imgur('puZy04m'), # Kid jumping into pool fail
imgur('MBdxv'), # Girl trying to jump bike ramp fail
imgur('6XRqt'), # FIXME
imgur('dFuBE'), # Cat jumps into a box
imgur('vUACp'), # Backflip off bleachers
imgur('59h9A8e')), # Backflip off tree
r'unacceptable': imgur('BwdP2xl'), # 3D rendering goes wrong
r'^(iknorite|right)\?$': imgur('RvquHs0'), # Breaking Bad: You're god damn right
r'fuck yea': (imgur('GZ5CD5r'), # Data shooting dice
imgur('nEmrMkq')), # Top Gun ... DANGER ZONE
r'\w+ broke prod': (imgur('SuCGnum'), # Anchorman: You ate the whole wheel of cheese?
imgur('sbQUDbF'),), # fail boat
r'^indeed$': (imgur('bQcbpki'), # Leonardo DiCaprio in Django Unchained
imgur('CRIcP'),), # Teal'c from Stargate SG-1
r'f(f{6}|7)u(u{11}|12)': 'http://i.minus.com/ibnfJRQi1h4z30.gif', # Workaholics: FUUUUUUUUUUUUUU
r'wtf': imgur('bpW6Xkd'), # WTF supercut
# Various modern unicode emoticons
r'(why|y) (u|you) no': u'ლ(ಠ益ಠლ)',
r'i (don\'?t know|dunno),? lol': u'¯\(°_o)/¯',
r'look.?of.?disapproval(\.jpg|\.gif)?': u'ಠ_ಠ',
r'i (am disappoint|disapprove)': u'ಠ_ಠ',
r'^not sure if \w+': u'≖_≖',
r'(tableflip|flip (a|the|some) tables?)': (u'(╯°□°)╯︵ ┻━┻',
u'(ノಠ益ಠ)ノ彡┻━┻'),
r'(gonna|going to) (make \w+ )?cry': u'(ಥ﹏ಥ)',
r'(bro ?fist|fist ?bump)': u'( _)=mm=(^_^ )',
r'hi(gh)?[ -]?five': ('\o',
u'( ‘-’)人(゚_゚ )'),
r'(^|[^\\])o/$': '\o',
r'^\\o$': 'o/'
}
def find_response(message):
for pat, resp in RESPONSES.iteritems():
if re.findall(pat, message, re.I):
found = resp
break
else:
return None
if isinstance(resp, (tuple, list)):
return random.choice(found)
return found
@match(find_response, priority=0)
def oneliner(client, channel, nick, message, match):
"""
Maybe some of these will become their own thing, but for
now, they live here.
DEAL WITH IT
"""
return match # pragma: no cover
|
SydneyUniLibrary/auto-holds | sierra/migrations/0001_initial.py | Python | gpl-3.0 | 1,534 | 0.000652 | # Copyright 2016 Susan Bennett, David Mitchell, Jim Nicholls
#
# This file is part of AutoHolds.
#
# A | utoHolds is free software: you can redistribute it and/or mo | dify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AutoHolds is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AutoHolds. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-16 15:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MaterialPropertyMyuser',
fields=[
('code', models.TextField(max_length=3, primary_key=True, serialize=False)),
('display_order', models.IntegerField(unique=True)),
('is_public', models.BooleanField()),
('name', models.TextField(max_length=255)),
],
options={
'managed': False,
'db_table': 'sierra_view.material_property_myuser',
},
),
]
|
Ultimaker/Cura | plugins/VersionUpgrade/VersionUpgrade40to41/__init__.py | Python | lgpl-3.0 | 2,370 | 0.004641 | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Dict, TYPE_CHECKING
from . import VersionUpgrade40to41
if TYPE_CHECKING:
from UM.Application import Application
upgrade = VersionUpgrade40to41.VersionUpgrade40to41()
def getMetaData() -> Dict[str, Any]:
return {
"version_upgrade": {
# From To Upgrade function
("preferences", 6000006): ("preferences", 6000007, upgrade.upgradePreferences),
("machine_stack", 4000006): ("machine_stack", 4000007, upgrade.upgradeStack),
("extruder_train", 4000006): ("extruder_train", 4000007, upgrade.upgradeStack),
("definition_changes", 4000006): ("definition_changes", 4000007, upgrade.upgradeInstanceContainer),
("quality_changes", 4000006): ("quality_changes", 4000007, upgrade.upgradeInstanceContainer),
("quali | ty", 4000006): ("quality", 4000007, upgrade.upgradeInstanceContainer),
("user", 4000006): ("user", 4000007, upgrade.upgradeInstanceContainer),
},
"sources": {
"preferences": {
"get_version": upgrade.getCfgVersion,
"location": {"."}
},
"machine_stack": {
"get_version": upgrade.get | CfgVersion,
"location": {"./machine_instances"}
},
"extruder_train": {
"get_version": upgrade.getCfgVersion,
"location": {"./extruders"}
},
"definition_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./definition_changes"}
},
"quality_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality_changes"}
},
"quality": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality"}
},
"user": {
"get_version": upgrade.getCfgVersion,
"location": {"./user"}
}
}
}
def register(app: "Application") -> Dict[str, Any]:
return { "version_upgrade": upgrade }
|
theKono/mobile-push | setup.py | Python | apache-2.0 | 1,163 | 0 | #!/usr/bin/env python
# standard library imports
from os.path import exists
# third party related imports
from setuptools import setup, find_packages
# local library imports
from mobile_push import __version__
def read_from_file(filename):
if exists(filename):
with open(filename) as f:
return f.read()
return ''
setup(
name='mobile-push',
version=__version__,
# Your name & email here |
author='Yu Liang',
author_email='yu.liang@thekono.com',
# If you had mobile_push.tests, you would also include that in this list
packages=find_packages(),
# Any executable scripts, typically in 'bin'. E.g 'bin/do-something.py'
scripts=[],
# REQUIRED: Your project's URL
url='https://github. | com/theKono/mobile-push',
# Put your license here. See LICENSE.txt for more information
license=read_from_file('LICENSE'),
# Put a nice one-liner description here
description='A mobile-push microservice (APNS, GCM)',
long_description=read_from_file('README.md'),
# Any requirements here, e.g. "Django >= 1.1.1"
install_requires=read_from_file('requirements.txt').split('\n'),
)
|
jakobzhao/wbcrawler3 | sentiment_by_tencent.py | Python | mit | 378 | 0 | # !/usr/bin/pytho | n
# -*- coding: utf-8 -*-
#
# Created on Nov 19, 2015
# @author: Bo Zhao
# @email: bo_zhao@hks.harvard.edu
# @website: http://yenching.org
# @organization: Harvard Kennedy School
import sys
from wbcrawler.sentiment import tencent_sentiment
reload(sys)
sys.setdefaultencoding('utf-8')
tencent_sentiment(0, 1, 'insurance', 'lo | calhost', 27017)
|
okuta/chainer | tests/chainer_tests/training_tests/triggers_tests/test_once_trigger.py | Python | mit | 6,615 | 0 | from __future__ import division
import numpy as np
import random
import tempfile
import unittest
from chainer import serializers
from chainer import testing
from chainer.testing import condition
from chainer import training
@testing.parameterize(
# basic
{
'iter_per_epoch': 5, 'call_on_resume': False, 'resume': 4},
# call on resume
{
'iter_per_epoch': 5, 'call_on_resume': True, 'resume': 4},
# unaligned epoch
{
'iter_per_epoch': 2.5, 'call_on_resume': False, 'resume': 3},
# unaligned epoch, call on resume
{
'iter_per_epoch': 2.5, 'call_on_resume': True, 'resume': 3},
# tiny epoch
{
'iter_per_epoch': 0.5, 'call_on_resume': False, 'resume': 4},
# tiny epoch, call on resume
{
'iter_per_epoch': 0.5, 'call_on_resume': True, 'resume': 4},
)
class TestOnceTrigger(unittest.TestCase):
expected = [True] + [False] * 6
finished = [False] + [True] * 6
def setUp(self):
self.resumed_expected = [True] + [False] * 6
self.resumed_finished = [False] + [True] * 6
if self.call_on_resume:
self.resumed_expected[self.resume] = True
self.resumed_finished[self.resume] = False
def test_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
for expected, finished in zip(self.expected, self.finished):
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
trainer.updater.update()
def test_resumed_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.OnceTrigger(self.call_on_resume)
for expected, finished in zip(self.resumed_expected[:self.resume],
self.resumed_finished[:self.resume]):
trainer.updater.update()
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
serializers.save_npz(f.name, trigger)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.resumed_expected[self.resume:],
self.resumed_finished[self.resume:]):
trainer.updater.update()
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
@condition.repeat(10)
def test_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
accumulated = False
accumulated_finished = True
for expected, finished in zip(self.expected, self.finished):
accumulated = accumulated or expected
accumulated_finished = accumulated_finished and finished
if random.randrange(2):
self.assertEqual(trigger.finished, accumulated_finished)
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
accumulated_finished = True
trainer.updater.update()
@condition.repeat(10)
def test_resumed_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
accumulated = False
accumulated_finished = True
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.OnceTrigger(self.call_on_resume)
for expected, finished in zip(self.resumed_expected[:self.resume],
self.resumed_finished[:self.resume]):
trainer.updater.update()
accumulated = accumulated or expected
accumulated_finished = accumulated_finished and finished
if random.randrange(2):
self.assertEqual(trigger.finished, accumulated_finished)
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
accumulated_finished = True
serializers.save_npz(f.name, trigger)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.resumed_expected[self.resume:],
self.resumed_finished[self.resume:]):
trainer.updater.update()
accumulated = accumulated or expected
accumulated_finished = accumulated_finished and finished
if random.randrange(2):
self.assertEqual(trigger.finished, accumulated_finished)
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
accumulated_finished = True
def test_resumed_trigger_backward_compat(self):
trainer = testing.get_trainer_with_mock_updater(
| stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = trainin | g.triggers.OnceTrigger(self.call_on_resume)
for expected, finished in zip(self.resumed_expected[:self.resume],
self.resumed_finished[:self.resume]):
trainer.updater.update()
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
# old version does not save anything
np.savez(f, dummy=0)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
with testing.assert_warns(UserWarning):
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.resumed_expected[self.resume:],
self.resumed_finished[self.resume:]):
trainer.updater.update()
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
testing.run_module(__name__, __file__)
|
ldm5180/hammerhead | data-manager/client/bdmplot2/bdmplot2_callback.py | Python | lgpl-2.1 | 7,139 | 0.013727 | # Copyright (c) 2008-2010, Regents of the University of Colorado.
# This work was supported by NASA contracts NNJ05HE10G, NNC06CB40C, and
# NNC07CB47C.
# This library is free software. You can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, version 2.1 of the License.
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details. A copy of the GNU
# Lesser General Public License v 2.1 can be found in the file named
# "COPYING.LESSER". You should have received a copy of the GNU Lesser
# General Public License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA.
# You may contact the Automation Group at:
# bionet@bioserve.colorado.edu
# Dr. Kevin Gifford
# University of Colorado
# Engineering Center, ECAE 1B08
# Boulder, CO 80309
# Because BioNet was developed at a university, we ask that you provide
# attribution to the BioNet authors in any redistribution, modification,
# work, or article based on this library.
# You may contribute modifications or suggestions to the University of
# Colorado for the purpose of discussing and improving this software.
# Before your modifications are incorporated into the master version
# distributed by the University of Colorado, we must have a contributor
# license agreement on file from each contributor. If you wish to supply
# the University with your modifications, please join our mailing list.
# Instructions can be found on our website at
# http://bioserve.colorado.edu/developers-corner.
from bdm_client import *
from timespan import timeval_to_int
import time
sessions = {}
bionet_resources = {}
#callbacks
def cb_lost_bdm(bdm, user_data):
None
def cb_new_bdm(bdm, user_data):
None
def cb_lost_hab(hab, user_data):
for i in range(0, bionet_hab_num_nodes(hab)):
node = bionet_hab_get_node_by_index(hab, i)
for j in range(0, bionet_node_get_num_resources(node)):
resource = bionet_node_get_resource_by_index(node, j)
pybionet_set_user_data(resource, None)
print("lost hab: " + bionet_hab_get_type(hab) + "." + bionet_hab_get_id(hab))
def cb_new_hab(hab, user_data):
print("new hab: " + bionet_hab_get_type(hab) + "." + bionet_hab_get_id(hab))
def cb_new_node(node, user_data):
hab = bionet_node_get_hab(node)
print("new node: " + bionet_node_get_name(node))
if (bionet_node_get_num_resources(node)):
print(" Resources:")
for i in range(bionet_node_get_num_resources(node)):
resource = bionet_node_get_resource_by_index(node, i)
datapoint = bionet_resource_get_datapoint_by_index(resource, 0)
if (datapoint == None):
print(" " + bionet_resource_data_type_to_string(bionet_resource_get_data_type(resource)) + " " + bionet_resource_flavor_to_string(bionet_resource_get_flavor(resource)) + " " + bionet_resource_get_id(resource) + ": (no known value)")
else:
value_str = bionet_value_to_str(bionet_datapoint_get_value(datapoint));
#%s %s %s = %s @ %s
print(" " + bionet_resource_data_type_to_string(bionet_resource_get_data_type(resource)) + " " + bionet_resource_flavor_to_string(bionet_resource_get_flavor(resource)) + " " + bionet_resource_get_id(resource) + " = " + value_str + " @ " + bionet_datapoint_timestamp_to_string(datapoint))
if (bionet_node_get_num_streams(node)):
print(" Streams:")
for i in range(bionet_node_get_num_streams(node)):
stream = bionet_node_get_stream_by_index(node, i)
print(" " + bionet_stream_get_id(stream) + " " + bionet_stream_get_type(stream) + " " + bionet_stream_direction_to_string(bionet_stream_get_direction(stream)))
def cb_lost_node(node, userdata):
hab = bionet_node_get_hab(node)
for j in range(0, bionet_node_get_num_resources(node)):
resource = bionet_node_get_resource_by_index(node, j)
pybionet_set_user_data(resource, None)
print("lost node: " + bionet_node_get_name(node))
def cb_datapoint(datapoint, userdata):
value = bionet_datapoint_get_value(datapoint);
resource = bionet_value_get_resource(value);
node = bionet_resource_get_node(resource);
hab = bion | et_node_get_hab(node);
value_str = bionet_value_to_str(value);
#"%s.%s.%s:%s = %s %s %s @ %s"
#print(bionet_resource_get_name(resource) + " = " + bionet_resource_data_type_to_string(bionet_resource_get_data_type(resource)) + " " + bionet_resource_flavor_to_string(bionet_resource_get_flavor(resource)) + " " + value_str + " @ " + bionet_datapoint_timestamp_to_string(datapoint))
now = time.time()
removal = []
resource_name = b | ionet_resource_get_name(resource)
dp = (timeval_to_int(bionet_datapoint_get_timestamp(datapoint)), value_str)
for session_id, session in sessions.iteritems():
found = False
for r in session['resource']:
if (bionet_resource_name_matches(resource_name, r)):
for name in session['bionet-resources']:
if (name == resource_name):
u = bionet_resources[name]
if (None == u) or ('datapoints' not in u) or ('sessions' not in u): # no user data is set yet
u = { 'datapoints' : [ dp ], 'sessions' : { session_id : [ dp ] } }
bionet_resources[name] = u
print "Added datapoint to new user data"
else: # user data is set, just append to it
u['datapoints'].append(dp)
if session_id in u['sessions']:
u['sessions'][session_id].append(dp)
else:
u['sessions'][session_id] = [ dp ]
#print "Added datapoint to existing user data"
found = True
if (False == found):
session['bionet-resources'].append(resource_name)
u = { 'datapoints' : [ dp ], 'sessions' : { session_id : [ dp ] } }
bionet_resources[resource_name] = u
#print "Added datapoint to new user data of new resource"
if (now > (session['last requested'] + 600)):
# this session hasn't been requested in more than 10 minutes. remove it
removal.append(session_id)
for session_id in removal:
#print "removed subscription ", sessions[session_id]['resource']
del sessions[session_id]
# TODO: unsubscribe when bdm_unsubscribe() is implemented
|
facebookexperimental/eden | eden/scm/edenscm/mercurial/hgweb/request.py | Python | gpl-2.0 | 5,622 | 0.000534 | # Portions Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# hgweb/request.py - An http request from either CGI or the standalone server.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import cgi
import errno
import socket
from .. import pycompat, util
from .common import HTTP_NOT_MODIFIED, ErrorResponse, statusmessage
shortcuts = {
"cl": [("cmd", ["changelog"]), ("rev", None)],
"sl": [("cmd", ["shortlog"]), ("rev", None)],
"cs": [("cmd", ["changeset"]), ("node", None)],
"f": [("cmd", ["file"]), ("filenode", None)],
"fl": [("cmd", ["filelog"]), ("filenode", None)],
"fd": [("cmd", ["filediff"]), ("node", None)],
"fa": [("cmd", ["annotate"]), ("filenode", None)],
"mf": [("cmd", ["manifest"]), ("manifest", None)],
"ca": [("cmd", ["archive"]), ("node", None)],
"tags": [("cmd", ["tags"])],
"tip": [("cmd", ["changeset"]), ("node", ["tip"])],
"static": [("cmd", ["static"]), ("file", None)],
}
def normalize(form):
# first expand the shortcuts
for k in shortcuts:
if k in form:
for name, value in shortcuts[k]:
if value is None:
value = form[k]
form[name] = value
del form[k]
# And strip the values
for k, v in pycompat.iteritems(form):
form[k] = [i.strip() for i in v]
return form
class wsgirequest(object):
"""Higher-level API for a WSGI request.
WSGI applications are invoked with 2 arguments. They are used to
instantiate instances of this class, which provides higher-level APIs
for obtaining request parameters, writing HTTP output, etc.
"""
def __init__(self, wsgienv, start_response):
version = wsgienv[r"wsgi.version"]
if (version < (1, 0)) or (version >= (2, 0)):
raise RuntimeError("Unknown and unsupported WSGI version %d.%d" % version)
self.inp = wsgienv[r"wsgi.input"]
self.err = wsgienv[r"wsgi.errors"]
self.threaded = wsgienv[r"wsgi.multithread"]
self.multiprocess = wsgienv[r"wsgi.multiprocess"]
self.run_once = wsgienv[r"wsgi.run_once"]
self.env = wsgienv
self.form = normalize(cgi.parse(self.inp, self.env, keep_blank_values=1))
self._start_response = start_response
self.server_write = None
self.headers = []
def __iter__(self):
return iter([])
def read(self, count=-1):
return self.inp.read(count)
def drain(self):
"""need to read all data from request, httplib is half-duplex"""
length = int(self.env.get("CONTENT_LENGTH") or 0)
for s in util.filechunkiter(self.inp, limit=length):
pass
def respond(self, status, type, filename=None, body=None):
if not isinstance(type, str):
type = type
if self._start_response is not None:
self.headers.append((r"Content-Type", type))
if filename:
filename = (
filename.rpartition("/")[-1]
.replace("\\", "\\\\")
.replace('"', '\\"')
)
self.headers.append(
("Content-Disposition", 'inline; filename="%s"' % filename)
)
if body is not None:
self.headers.append((r"Content-Length", str(len(body))))
for k, v in self.headers:
if not isinstance(v, str):
raise TypeError("header value must be string: %r" % (v,))
if isinstance(status, ErrorResponse):
self.headers.extend(status.headers)
if status.code == HTTP_NOT_MODIFIED:
# RFC 2616 Section 10.3.5: 304 Not Modified has cases where
# it MUST NOT include any headers other than these and no
# body
self.headers = [
(k, v)
for (k, v) in self.headers
if k in ("Date", "ETag", "Expires", "Cache-Control", "Vary")
]
status = statusmessage(status.code, str(status))
elif status == 200:
status = "200 Script output follows"
elif isinstance(status, int):
status = statusmessage(status)
self.server_write = self._start_response(status, self.headers)
self._start_response = None
self.headers = []
if body is not None:
self.write(body)
self.server_write = None
def wri | te(self, thing):
if thing:
try:
self.server_write(thing)
except socket.error as inst:
if inst.errno != errno.ECONNRESET:
raise
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
return None
def close(self):
return None
def wsgiapplication(app_maker):
"""For comp | atibility with old CGI scripts. A plain hgweb() or hgwebdir()
can and should now be used as a WSGI application."""
application = app_maker()
def run_wsgi(env, respond):
return application(env, respond)
return run_wsgi
|
pidydx/grr | grr/lib/email_alerts.py | Python | apache-2.0 | 5,328 | 0.006381 | #!/usr/bin/env python
"""A simple wrapper to send email alerts."""
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import logging
import re
import smtplib
import socket
from grr.lib import config_lib
from grr.lib import registry
from grr.lib.rdfvalues import standard as rdf_standard
class EmailAlerterBase(object):
"""The email alerter base class."""
__metaclass__ = registry.MetaclassRegistry
def RemoveHtmlTags(se | lf, data):
p = re.compile(r"<.*?>")
return p.sub("", data)
def AddEmailDomain(self, address):
suffix = config_lib.CONFIG["Logging.domain"]
if isinstance(address, rdf_standard.DomainEmailAddress):
address = str(address)
if suffix and "@" not in address:
return address + "@%s" % suffix
return address
def SplitEmailsAndAppendEmailDomain(self, address_list):
"" | "Splits a string of comma-separated emails, appending default domain."""
result = []
# Process email addresses, and build up a list.
if isinstance(address_list, rdf_standard.DomainEmailAddress):
address_list = [str(address_list)]
elif isinstance(address_list, basestring):
address_list = [address for address in address_list.split(",") if address]
for address in address_list:
result.append(self.AddEmailDomain(address))
return result
def SendEmail(self,
to_addresses,
from_address,
subject,
message,
attachments=None,
is_html=True,
cc_addresses=None,
message_id=None,
headers=None):
raise NotImplementedError()
class SMTPEmailAlerter(EmailAlerterBase):
def SendEmail(self,
to_addresses,
from_address,
subject,
message,
attachments=None,
is_html=True,
cc_addresses=None,
message_id=None,
headers=None):
"""This method sends an email notification.
Args:
to_addresses: blah@mycompany.com string, list of addresses as csv string,
or rdf_standard.DomainEmailAddress
from_address: blah@mycompany.com string
subject: email subject string
message: message contents string, as HTML or plain text
attachments: iterable of filename string and file data tuples,
e.g. {"/file/name/string": filedata}
is_html: true if message is in HTML format
cc_addresses: blah@mycompany.com string, or list of addresses as
csv string
message_id: smtp message_id. Used to enable conversation threading
headers: dict of str-> str, headers to set
Raises:
RuntimeError: for problems connecting to smtp server.
"""
headers = headers or {}
msg = MIMEMultipart("alternative")
if is_html:
text = self.RemoveHtmlTags(message)
part1 = MIMEText(text, "plain")
msg.attach(part1)
part2 = MIMEText(message, "html")
msg.attach(part2)
else:
part1 = MIMEText(message, "plain")
msg.attach(part1)
if attachments:
for file_name, file_data in attachments.iteritems():
part = MIMEBase("application", "octet-stream")
part.set_payload(file_data)
encoders.encode_base64(part)
part.add_header("Content-Disposition",
"attachment; filename=\"%s\"" % file_name)
msg.attach(part)
msg["Subject"] = subject
from_address = self.AddEmailDomain(from_address)
to_addresses = self.SplitEmailsAndAppendEmailDomain(to_addresses)
cc_addresses = self.SplitEmailsAndAppendEmailDomain(cc_addresses or "")
msg["From"] = from_address
msg["To"] = ",".join(to_addresses)
if cc_addresses:
msg["CC"] = ",".join(cc_addresses)
if message_id:
msg.add_header("Message-ID", message_id)
for header, value in headers.iteritems():
msg.add_header(header, value)
try:
s = smtplib.SMTP(config_lib.CONFIG["Worker.smtp_server"],
int(config_lib.CONFIG["Worker.smtp_port"]))
s.ehlo()
if config_lib.CONFIG["Worker.smtp_starttls"]:
s.starttls()
s.ehlo()
if (config_lib.CONFIG["Worker.smtp_user"] and
config_lib.CONFIG["Worker.smtp_password"]):
s.login(config_lib.CONFIG["Worker.smtp_user"],
config_lib.CONFIG["Worker.smtp_password"])
s.sendmail(from_address, to_addresses + cc_addresses, msg.as_string())
s.quit()
except (socket.error, smtplib.SMTPException) as e:
raise RuntimeError("Could not connect to SMTP server to send email. "
"Please check config option Worker.smtp_server. "
"Currently set to %s. Error: %s" %
(config_lib.CONFIG["Worker.smtp_server"], e))
EMAIL_ALERTER = None
class EmailAlerterInit(registry.InitHook):
def RunOnce(self):
global EMAIL_ALERTER
email_alerter_cls_name = config_lib.CONFIG["Server.email_alerter_class"]
logging.debug("Using email alerter: %s", email_alerter_cls_name)
cls = EmailAlerterBase.GetPlugin(email_alerter_cls_name)
EMAIL_ALERTER = cls()
|
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/addons/add_mesh_BoltFactory/presets/M5.py | Python | gpl-3.0 | 795 | 0.003774 | props.bf_Shank_Dia = 5.0
#props.bf_Pitch = 0.8 # Coarse
props.bf_Pitch = 0.5 # Fine
props.bf_Crest_Percent = 10
props.bf_Root_Percent = 10
props.bf_Major_Dia = 5.0
props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * pro | ps.bf_Pitch)
props.bf_Hex_Head_Flat_Distance = 8.0
props.bf_Hex_Head_Height = 3.5
props.bf_Cap_Head_Dia = 8.5
props.bf_Cap_Head_Height = 5.0
props.bf_CounterSink_Head_Dia = 10.4
props.bf_Allen_Bit_Flat_Distance = 4.0
props.bf_Allen_Bit_Depth = 2.5
props.bf_Pan_Head_Dia = 9.5
props.bf_Dome_Head_Dia = 9.5
props.b | f_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
props.bf_Hex_Nut_Height = 4.0
props.bf_Hex_Nut_Flat_Distance = 8.0
props.bf_Thread_Length = 10
props.bf_Shank_Length = 0.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.