text stringlengths 4 1.02M | meta dict |
|---|---|
"""Operators that interact with Google Cloud Life Sciences service."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.life_sciences import LifeSciencesHook
from airflow.providers.google.cloud.links.life_sciences import LifeSciencesLink
if TYPE_CHECKING:
from airflow.utils.context import Context
class LifeSciencesRunPipelineOperator(BaseOperator):
"""
Runs a Life Sciences Pipeline
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:LifeSciencesRunPipelineOperator`
:param body: The request body
:param location: The location of the project
:param project_id: ID of the Google Cloud project if None then
default project_id is used.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param api_version: API version used (for example v2beta).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
operator_extra_links = (LifeSciencesLink(),)
def __init__(
self,
*,
body: dict,
location: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v2beta",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = body
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self._validate_inputs()
self.impersonation_chain = impersonation_chain
def _validate_inputs(self) -> None:
if not self.body:
raise AirflowException("The required parameter 'body' is missing")
if not self.location:
raise AirflowException("The required parameter 'location' is missing")
def execute(self, context: Context) -> dict:
hook = LifeSciencesHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
project_id = self.project_id or hook.project_id
if project_id:
LifeSciencesLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return hook.run_pipeline(body=self.body, location=self.location, project_id=self.project_id)
| {
"content_hash": "edb3398862081977d326ab5a3126c15d",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 100,
"avg_line_length": 38.63953488372093,
"alnum_prop": 0.6602467649714113,
"repo_name": "apache/airflow",
"id": "b549d2f786e66bf6797a3c9d904da70fae693638",
"size": "4110",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/operators/life_sciences.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
import subprocess
import shutil, sys, os
import xml.etree.ElementTree as ET
sys.path.append(os.getcwd())
from mesonbuild import coredata
from packaging.createmsi import get_modules
class PkgGenerator:
def __init__(self):
self.pkg_dir = 'macpkg'
self.sharedir = os.path.join(self.pkg_dir, 'usr/local/share')
self.bindir = os.path.join(self.pkg_dir, 'usr/local/bin')
self.product_name = 'Meson Build System'
self.identifier = 'com.mesonbuild.meson'
self.version = coredata.version.replace('dev', '')
self.mesonstashdir = os.path.join(self.sharedir, f'meson-{self.version}')
self.pkgname = f'meson.pkg'
self.productname = f'meson-{self.version}.pkg'
self.distribution_file = 'meson-distribution.xml'
self.resourcedir = 'packaging/macpages'
def build_dist(self):
if os.path.exists(self.pkg_dir):
shutil.rmtree(self.pkg_dir)
os.mkdir(self.pkg_dir)
pyinstaller_bin = '/Users/jpakkane/Library/Python/3.8/bin/pyinstaller'
pyinst_cmd = [pyinstaller_bin,
'--clean',
'--distpath',
self.pkg_dir]
for m in get_modules():
pyinst_cmd += ['--hidden-import', m]
pyinst_cmd += ['meson.py']
subprocess.check_call(pyinst_cmd )
tmpdir = os.path.join(self.pkg_dir, 'meson')
shutil.move(tmpdir, self.mesonstashdir)
os.makedirs(self.bindir)
ln_base = os.path.relpath(self.mesonstashdir, self.bindir)
ninja_bin = shutil.which('ninja')
assert(ninja_bin)
shutil.copy(ninja_bin, self.bindir)
subprocess.check_call(['strip', os.path.join(self.bindir, 'ninja')])
os.symlink(os.path.join(ln_base, 'meson'), os.path.join(self.bindir, 'meson'))
def build_package(self):
subprocess.check_call(['pkgbuild',
'--root',
self.pkg_dir,
'--identifier',
self.identifier,
self.pkgname])
self.generate_distribution()
subprocess.check_call(['productbuild',
'--distribution',
self.distribution_file,
'--resources',
self.resourcedir,
self.productname])
def generate_distribution(self):
root = ET.Element('installer-gui-script', {'minSpecVersion': '1'})
ET.SubElement(root, 'welcome', {'file': 'welcome.html',
'mime-type': 'text/html'})
ET.SubElement(root, 'license', {'file': 'license.html',
'mime-type': 'text/html'})
ET.SubElement(root, 'conclusion', {'file': 'conclusion.html',
'mime-type': 'text/html'})
ET.SubElement(root, 'pkg-ref', {'id': self.identifier})
ET.SubElement(root, 'options', {'customize': 'never',
'require-scripts': 'false',
'hostArhcitectures': 'x86_64,arm64'})
choices_outline = ET.SubElement(root, 'choices-outline')
line = ET.SubElement(choices_outline, 'line', {'choice': 'default'})
ET.SubElement(line, 'line', {'choice': self.identifier})
ET.SubElement(root, 'choice', {'id': 'default'})
choice = ET.SubElement(root, 'choice', {'id': self.identifier, 'visible': 'false'})
ET.SubElement(choice, 'pkg-ref', {'id': self.identifier})
ET.SubElement(root, 'pkg-ref', {'id': self.identifier,
'version': '0',#self.version,
'onConclusion': 'none'}).text = self.pkgname
ET.ElementTree(root).write(self.distribution_file, encoding='utf-8', xml_declaration=True)
# ElementTree can not do prettyprinting so do it manually
import xml.dom.minidom
doc = xml.dom.minidom.parse(self.distribution_file)
with open(self.distribution_file, 'w') as open_file:
open_file.write(doc.toprettyxml())
if __name__ == '__main__':
if not os.path.exists('meson.py'):
sys.exit(print('Run me in the top level source dir.'))
subprocess.check_call(['pip3', 'install', '--user', '--upgrade', 'pyinstaller'])
pg = PkgGenerator()
pg.build_dist()
pg.build_package()
| {
"content_hash": "75b9e6901543406115a4fd43c7f7919d",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 98,
"avg_line_length": 45.00990099009901,
"alnum_prop": 0.5415750109986801,
"repo_name": "QuLogic/meson",
"id": "ab3c51c6e6553e6ee7613e25f1eb44ab9483dade",
"size": "5166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packaging/createpkg.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4862"
},
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "196268"
},
{
"name": "C#",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "59203"
},
{
"name": "CMake",
"bytes": "35279"
},
{
"name": "Cuda",
"bytes": "10458"
},
{
"name": "D",
"bytes": "5313"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "12020"
},
{
"name": "Genie",
"bytes": "477"
},
{
"name": "HTML",
"bytes": "897"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "2900"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "187"
},
{
"name": "Limbo",
"bytes": "28"
},
{
"name": "Meson",
"bytes": "527053"
},
{
"name": "Objective-C",
"bytes": "688"
},
{
"name": "Objective-C++",
"bytes": "381"
},
{
"name": "PowerShell",
"bytes": "3210"
},
{
"name": "Python",
"bytes": "3598710"
},
{
"name": "Roff",
"bytes": "625"
},
{
"name": "Rust",
"bytes": "3192"
},
{
"name": "Shell",
"bytes": "10416"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim Script",
"bytes": "9743"
},
{
"name": "Yacc",
"bytes": "103"
}
],
"symlink_target": ""
} |
import discord
from discord.ext import commands
import time
import asyncio
import unicodedata
import cogs.emojis as Emojis
import cogs.glyphs as Glyphs
import inflect
import upsidedown
import datetime
from collections import Counter
class Fun():
def __init__(self, bot):
self.bot = bot
def texttoemoji(self, text: str = None):
if not text:
return
text = text.lower()
msg = ""
p = inflect.engine()
chars = list(text)
for char in chars:
if char.isdigit():
msg += f"{char}\u20e3"
elif char.isalpha():
msg += f":regional_indicator_{char}:"
# " ".join([" " if x==" " else ":regional_indicator_{}:".format(x) for x in "hm hm"])
elif char == " ":
msg += " "
else:
msg += char
return msg
def upsidedown(self, text: str):
return upsidedown.transform(text)
@commands.command()
async def ping(self, ctx):
before = time.monotonic()
await (await self.bot.ws.ping())
after = time.monotonic()
pingT = (after - before) * 1000
await ctx.message.edit(content="Ping. :ping_pong:")
await ctx.send(content="Pong. :ping_pong: **{0:.0f}ms**".format(pingT))
@commands.command()
async def status(self, ctx, *, status: str):
status = status.strip("`")
await self.bot.change_presence(game=discord.Game(name=status))
await asyncio.sleep(1)
await ctx.message.edit(content=f"**Playing** {ctx.guild.me.game}")
@commands.command()
async def stat_test(self, ctx):
await self.bot.change_presence(game=None)
await ctx.message.edit(content="Playing set to None")
@commands.command()
async def charinfo(self, ctx, *, characters: str):
if len(characters) > 15:
await ctx.send(self.bot.blank + f"Too many characters ({len(characters)}/15)")
return
fmt = "`\\U{0:>08}`: {1} - {2} \N{EM DASH} <http://www.fileformat.info/info/unicode/char/{0}>"
def to_string(c):
digit = format(ord(c), "x")
name = unicodedata.name(c, "Name not found.")
return fmt.format(digit, name, c)
await ctx.message.edit("\n".join(map(to_string, characters)))
@commands.command(aliases=["ustatus"])
async def cstatus(self, ctx, id=None):
if not id: await ctx.message.edit(content="Type the ID!"); return
try: id = int(id)
except: await ctx.message.edit(content="Type the ID!"); return
member = discord.utils.get(self.bot.get_all_members(), id=id)
if not member:
await ctx.message.edit(content=f"Can't find a user with the ID of {id}")
return
await ctx.message.edit(content=f"{str(member)}'s status is: {str(member.status).title()}")
@commands.command()
async def profile(self, ctx, *, arg = None):
if not arg: arg = str(ctx.author)
Int = arg.isdigit()
if Int:
id = int(arg)
member = discord.utils.get(ctx.guild.members, id=id)
if not member:
await ctx.message.edit(content=f"Could not find the user with the ID of `{arg}` "
f"on the server `{ctx.guild.name}`")
return
elif not Int:
# await ctx.send(self.bot.blank + "{0}, {1}".format(arg.split("#")[0], int(arg.split("#")[1])))
member = discord.utils.get(ctx.guild.members, name = arg.split("#")[0], discriminator = arg.split("#")[1])
if not member:
await ctx.message.edit(content=f"Could not find the user `{arg.split('#')[0]}` "
f"on the server `{ctx.guild.name}`")
return
id = member.id
else:
await ctx.send(self.bot.blank + "Type check not working or float given.")
return
embed = discord.Embed(description=f"Profile for {str(member)}", colour=member.colour)
embed.add_field(name="Profile Link", value=f"<@{id}>")
await ctx.message.edit(content="", embed=embed)
@commands.command(aliases=["emojis", "emote", "emotes"])
async def emoji(self, ctx, emoji: str = None, edit = True):
if not emoji:
allEmojis = "`"+"`, `".join(Emojis.emojis.keys())+"`"
await ctx.message.edit(content=f"All available emotes are: {allEmojis}")
return
if not emoji.lower() in Emojis.emojis:
await ctx.message.edit(content=f"Can't find the emoji `{emoji}`.")
return
emoji = emoji.lower()
final = Emojis.emojis[emoji]
if edit:
await ctx.message.edit(content=final)
else:
await ctx.send(final)
@commands.command()
async def channels(self, ctx):
channels = []
for channel in ctx.guild.text_channels:
channels.append(channel.name.title())
await ctx.message.edit(content=self.bot.blank + f"All text channels on the server "
f"`{ctx.guild.name}`: `" + "`, `".join(channels) + "`")
@commands.command()
async def roles(self, ctx):
roles = []
for role in ctx.guild.roles:
roles.append(role.name)
await ctx.message.edit(content=self.bot.blank + f"All roles on the server `{ctx.guild.name}`: " + "`" + "`, `".join(roles)+"`")
@commands.command()
async def emojitext(self, ctx, *, text: str = None):
msg = self.texttoemoji(text)
if not msg:
await ctx.send(self.bot.blank + "No Text!")
return
await ctx.message.edit(content=msg)
@commands.command(enabled=False)
async def react(self, ctx, channel: discord.TextChannel = None, id: int = None, *, text: str = None):
if not channel:
await ctx.send(self.bot.blank + "No Channel")
return
if not id:
await ctx.send(self.bot.blank + "No Message ID")
return
if not text:
await ctx.send(self.bot.blank + "Text?")
message = channel.get_message(id)
msg = self.texttoemoji(text)
if not msg:
await ctx.send(self.bot.blank + "No `msg` var")
return
@commands.command(name="upsidedown")
async def _upsidedown(self, ctx, *, text: str):
await ctx.send(self.upsidedown(text))
@commands.command()
async def quick(self, ctx, *, message = None):
if not message: return
message = message.strip("`")
if "@" in message:
pass
msg = await ctx.send(message)
await ctx.message.delete()
await msg.delete()
@commands.command()
async def quick_mention(self, ctx, id = None):
if not id or not id.isdigit():
return
id = int(id)
user = self.bot.get_user(id)
if not user:
await ctx.message.edit(content="Can't find that user")
return
msg = await ctx.send(user.mention)
await ctx.message.delete()
await msg.delete()
@commands.command(aliases=["picture", "photo"])
async def pic(self, ctx, *, url):
embed = discord.Embed(title="Picture", url=url)
embed.set_image(url=url)
try: await ctx.message.edit(content="", embed=embed)
except: await ctx.send("Can't find that link.")
@commands.command(aliases=["mutualguilds", "mutualg", "mutuals"])
async def mutualservers(self, ctx, user: discord.User = None, list = False):
if not user:
await ctx.send("Give the user!")
return
profile = await user.profile()
amount = len(profile.mutual_guilds)
embed = discord.Embed(title=f"Amount of mutual guilds for {user}", description=f"Amount of mutual guilds with "
f"{user.mention}")
embed.add_field(name="Mutual Guilds", value=str(amount))
if list:
listGuilds = ", ".join(x.name for x in profile.mutual_guilds)
embed.add_field(name="List of Mutual Guilds", value=listGuilds)
embed.set_footer(text=("Mutual Guilds since " + datetime.datetime.utcnow().strftime("%A %d %B %Y at %H:%M:%S")))
await ctx.send(embed=embed)
# @commands.command()
# async def highestmutual(self, ctx):
# top = ["None", 0]
# for member in self.bot.get_all_members():
# profile = await member.profile()
# amount = len(profile.mutual_guilds)
#
# if amount > top[1]:
# top = [str(member), amount]
#
# elif amount == top[1]:
# pass
#
# await ctx.send(f"The person with the most mutual guilds with {self.bot.user} is {top[0]} at {top[1]}")
@commands.command(aliases=["mutualhighest", "highestmutuals", "mutualleaderboard"])
async def highestmutual(self, ctx):
try: await ctx.message.add_reaction("\u2705")
except discord.Forbidden: pass
members = Counter(str(m).replace("`", "\\`") for m in self.bot.get_all_members() if m.bot is False)
top = members.most_common(11)[1:] # Remove Myself
result = []
for index, (member, count) in enumerate(top, 1):
if index != 10:
result.append("{}\u20e3: {} ({} servers)".format(index, member, count))
else:
result.append("\U0001f51f: {} ({} servers)".format(member, count))
message = "\n".join(result)
await ctx.send("Leaderboard for mutual servers\n" + message)
await ctx.message.delete()
@commands.command(aliases=["glyphs"])
async def glyph(self, ctx, *, glyph: str = None):
if not glyph:
allGlyphs = "`" + "`, `".join(Glyphs.glyphs.keys()) + "`"
await ctx.send(content=f"All available glyphs are: {allGlyphs}")
return
if not glyph.upper() in Glyphs.glyphs:
await ctx.send(content=f"Can't find the glyph `{glyph}`.")
return
glyph = glyph.upper()
url = Glyphs.glyphs[glyph]
embed = discord.Embed(title=f"{glyph.upper()}")
embed.set_image(url=url)
await ctx.send(embed=embed)
@commands.command()
async def charinfo(self, ctx, *, characters: str):
"""Gives unicode info on an emoji."""
if len(characters) > 15:
await ctx.send(self.bot.blank + f"Too many characters ({len(characters)}/15)")
return
fmt = "`\\U{0:>08}`: {1} - {2} \N{EM DASH} <http://www.fileformat.info/info/unicode/char/{0}>"
def to_string(c):
digit = format(ord(c), "x")
name = unicodedata.name(c, "Name not found.")
return fmt.format(digit, name, c)
await ctx.message.edit(content=self.bot.blank + "\n".join(map(to_string, characters)))
def setup(bot):
bot.add_cog(Fun(bot))
# import discord
# user = discord.utils.get(ctx.guild.members, id=80088516616269824)
# if not user:
# await ctx.send(self.bot.blank + f"Can't find a user with the ID of {id}")
# return
# await ctx.send(self.bot.blank + f"{str(user)}'s status is: {str(user.status).title()}")
# blacklist = [
# "bots",
# "orangutan",
# "role troll",
# "v alliance leader",
# "itrust",
# "moderator",
# "server admin"
# ]
# roles = []
# for role in ctx.guild.roles:
# if role.hoist and role.name.lower() not in blacklist:
# roles.append(role.name)
#
# rolesS = "\n".join(roles)
# await ctx.send(f"```{rolesS}```")
#
# for member in ctx.guild.members:
# for role in member.roles:
# if role.name in roles:
# await member.add_roles(discord.utils.get(ctx.guild.roles, name = "Clan Member"))
#
# await ctx.send("Done")
#
# counter = 0
# for server in bot.guilds:
# try:
# await server.default_channel("Just to remind you, we have a Discord server: https://discord.gg/duRB6Qg")
# except discord.Forbidden:
# counter += 1
# await ctx.send(f"Done. Failed {counter} times.")
#
# for server in bot.guilds:
# try:
# async for message in server.default_channel.history(limit=200):
# if "Just to remind you, we have a Discord server" in message.content and message.id != ctx.message.id and ctx.author.id == 150750980097441792:
# try: await message.delete()
# except: continue
# except:
# continue | {
"content_hash": "d9a52b94177388693692783d18888882",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 160,
"avg_line_length": 35.84985835694051,
"alnum_prop": 0.5623073883840379,
"repo_name": "OrangutanGaming/OG_SelfBot",
"id": "e40497c4f57e5df15cf64dfb7fe0d6a92468c33e",
"size": "12655",
"binary": false,
"copies": "1",
"ref": "refs/heads/rewrite",
"path": "cogs/fun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51691"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Text.status'
db.add_column(u'text_text', 'status',
self.gf('django.db.models.fields.CharField')(default='pending', max_length=32),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Text.status'
db.delete_column(u'text_text', 'status')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'text.text': {
'Meta': {'object_name': 'Text'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'readable': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '32'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['text'] | {
"content_hash": "840046808c1498898e06dad4e7437b07",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 195,
"avg_line_length": 65.20547945205479,
"alnum_prop": 0.5529411764705883,
"repo_name": "blumug/texapi",
"id": "8750f74b737b817c47947ed0d8bd491ef896e071",
"size": "4784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "text/migrations/0002_auto__add_field_text_status.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2709"
},
{
"name": "HTML",
"bytes": "640044"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "108002"
},
{
"name": "Ruby",
"bytes": "4967"
},
{
"name": "Shell",
"bytes": "3761"
}
],
"symlink_target": ""
} |
"""Qiskit pulse scheduling tests."""
| {
"content_hash": "3d7149ad8b2a00c99a1cfbae5b0e4052",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 36,
"avg_line_length": 37,
"alnum_prop": 0.7027027027027027,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "b56983c88a1e9b33dbcfc60199c782542cb89028",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/python/scheduler/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
"""This module provides a pretty printer for lucene query tree.
"""
from .tree import BaseOperation, BaseGroup, SearchField
class _StickMarker:
"""Use in list between two elements that must stick together
"""
def __len__(self):
return 0
# a marker to avoid a new line between two elements
_STICK_MARKER = _StickMarker()
class Prettifier(object):
"""Class to generate a pretty printer.
"""
def __init__(self, indent=4, max_len=80, inline_ops=False):
"""
The pretty printer factory.
:param int indent: number of space for indentation
:param int max_len: maximum line length in number of characters.
Prettyfier will do its best to keep inside those margin,
but as it can only split on operators, it may not be possible.
:param bool inline_ops: if False (default) operators are printed on a new line
if True, operators are printed at the end of the line.
"""
self.indent = indent
self.prefix = " " * self.indent
self.max_len = max_len
self.inline_ops = inline_ops
def _get_chains(self, element, parent=None):
"""return a list of string and list, and recursively
An inner list represent a level of indentation
A string is information from the level
"""
if isinstance(element, BaseOperation):
if not isinstance(parent, BaseOperation) or element.op == parent.op:
# same level, this is just associativity
num_children = len(element.children)
for n, child in enumerate(element.children):
yield from self._get_chains(child, element)
if n < num_children - 1:
if self.inline_ops:
yield _STICK_MARKER
if element.op:
yield element.op
else:
# another operation, raise level
new_level = []
num_children = len(element.children)
for n, child in enumerate(element.children):
new_level.extend(self._get_chains(child, element))
if n < num_children - 1:
if self.inline_ops:
new_level.append(_STICK_MARKER)
if element.op:
new_level.append(element.op)
yield new_level
elif isinstance(element, BaseGroup):
# raise level
yield "("
yield list(self._get_chains(element.expr, element))
if self.inline_ops:
yield _STICK_MARKER
yield ")"
elif isinstance(element, SearchField):
# use recursion on sub expression
yield element.name + ":"
yield _STICK_MARKER
yield from self._get_chains(element.expr, element)
else:
# simple element
yield str(element)
def _count_chars(self, element):
"""Replace each element by the element and a count of chars in it (and recursively)
This will help, compute if elements can stand on a line or not
"""
if isinstance(element, list):
with_counts = [self._count_chars(c)for c in element]
# when counting we add a space for joining
return with_counts, sum(n + 1 for c, n in with_counts) - 1
else:
return element, len(element)
def _apply_stick(self, elements):
last = None
sticking = False
for current in elements:
if current == _STICK_MARKER:
assert last is not None, "_STICK_MARKER should never be first !"
sticking = True
elif sticking:
last += " " + current
sticking = False
else:
if last is not None:
yield last
last = current
yield last
def _concatenates(self, chain_with_counts, char_counts, level=0, in_one_liner=False):
"""taking the result of _get_chains after passing through _count_chars,
arrange things, using newlines and indentation when necessary
:return string: prettified expression
"""
# evaluate if it's feasible in one-line
one_liner = in_one_liner or char_counts < self.max_len - (self.indent * level)
new_level = level if one_liner else level + 1
elements = [
self._concatenates(c, n, level=new_level, in_one_liner=one_liner)
if isinstance(c, list)
else c
for c, n in chain_with_counts]
elements = self._apply_stick(elements)
prefix = self.prefix if level and not in_one_liner else ""
join_char = " " if one_liner else ("\n" + prefix)
return prefix + join_char.join(l for c in elements for l in c.split("\n"))
def __call__(self, tree):
"""Pretty print the query represented by tree
:param tree: a query tree using elements from :py:mod:`luqum.tree`
"""
chains = list(self._get_chains(tree))
chain_with_counts, total = self._count_chars(chains)
return self._concatenates(chain_with_counts, total)
prettify = Prettifier()
"""prettify function with default parameters
"""
| {
"content_hash": "fbc8cc7e57e8754be1dafc5074646869",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 91,
"avg_line_length": 37.95070422535211,
"alnum_prop": 0.5646687697160884,
"repo_name": "adsabs/object_service",
"id": "ceb76ec539f1688c205110251b6ad4d2d97fba27",
"size": "5413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "object_service/luqum/pretty.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "189892"
}
],
"symlink_target": ""
} |
import datetime
import os
import signal
import urllib
from tempfile import NamedTemporaryFile
from typing import List, Optional, Union, cast
from unittest import mock
from unittest.mock import call, mock_open, patch
import pendulum
import pytest
from freezegun import freeze_time
from airflow import models, settings
from airflow.example_dags.plugins.workday import AfterWorkdayTimetable
from airflow.exceptions import (
AirflowException,
AirflowFailException,
AirflowRescheduleException,
AirflowSensorTimeout,
AirflowSkipException,
UnmappableXComPushed,
)
from airflow.models import (
DAG,
Connection,
DagRun,
Pool,
RenderedTaskInstanceFields,
TaskInstance as TI,
TaskReschedule,
Variable,
XCom,
)
from airflow.models.taskinstance import load_error_file, set_error_file
from airflow.models.taskmap import TaskMap
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.sensors.base import BaseSensorOperator
from airflow.sensors.python import PythonSensor
from airflow.serialization.serialized_objects import SerializedBaseOperator
from airflow.stats import Stats
from airflow.ti_deps.dependencies_deps import REQUEUEABLE_DEPS, RUNNING_DEPS
from airflow.ti_deps.dependencies_states import RUNNABLE_STATES
from airflow.ti_deps.deps.base_ti_dep import TIDepStatus
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.db import merge_conn
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State, TaskInstanceState
from airflow.utils.types import DagRunType
from airflow.version import version
from tests.models import DEFAULT_DATE, TEST_DAGS_FOLDER
from tests.test_utils import db
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_connections, clear_db_runs
@pytest.fixture
def test_pool():
with create_session() as session:
test_pool = Pool(pool='test_pool', slots=1)
session.add(test_pool)
session.flush()
yield test_pool
session.rollback()
class CallbackWrapper:
task_id: Optional[str] = None
dag_id: Optional[str] = None
execution_date: Optional[datetime.datetime] = None
task_state_in_callback: Optional[str] = None
callback_ran = False
def wrap_task_instance(self, ti):
self.task_id = ti.task_id
self.dag_id = ti.dag_id
self.execution_date = ti.execution_date
self.task_state_in_callback = ""
self.callback_ran = False
def success_handler(self, context):
self.callback_ran = True
session = settings.Session()
temp_instance = (
session.query(TI)
.filter(TI.task_id == self.task_id)
.filter(TI.dag_id == self.dag_id)
.filter(TI.execution_date == self.execution_date)
.one()
)
self.task_state_in_callback = temp_instance.state
class TestTaskInstance:
@staticmethod
def clean_db():
db.clear_db_dags()
db.clear_db_pools()
db.clear_db_runs()
db.clear_db_task_fail()
db.clear_rendered_ti_fields()
db.clear_db_task_reschedule()
def setup_method(self):
self.clean_db()
# We don't want to store any code for (test) dags created in this file
with patch.object(settings, "STORE_DAG_CODE", False):
yield
def teardown_method(self):
self.clean_db()
def test_load_error_file_returns_None_for_closed_file(self):
error_fd = NamedTemporaryFile()
error_fd.close()
assert load_error_file(error_fd) is None
def test_load_error_file_loads_correctly(self):
error_message = "some random error message"
with NamedTemporaryFile() as error_fd:
set_error_file(error_fd.name, error=error_message)
assert load_error_file(error_fd) == error_message
def test_set_task_dates(self, dag_maker):
"""
Test that tasks properly take start/end dates from DAGs
"""
with dag_maker('dag', end_date=DEFAULT_DATE + datetime.timedelta(days=10)) as dag:
pass
op1 = DummyOperator(task_id='op_1')
assert op1.start_date is None and op1.end_date is None
# dag should assign its dates to op1 because op1 has no dates
dag.add_task(op1)
dag_maker.create_dagrun()
assert op1.start_date == dag.start_date and op1.end_date == dag.end_date
op2 = DummyOperator(
task_id='op_2',
start_date=DEFAULT_DATE - datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=11),
)
# dag should assign its dates to op2 because they are more restrictive
dag.add_task(op2)
assert op2.start_date == dag.start_date and op2.end_date == dag.end_date
op3 = DummyOperator(
task_id='op_3',
start_date=DEFAULT_DATE + datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=9),
)
# op3 should keep its dates because they are more restrictive
dag.add_task(op3)
assert op3.start_date == DEFAULT_DATE + datetime.timedelta(days=1)
assert op3.end_date == DEFAULT_DATE + datetime.timedelta(days=9)
def test_set_dag(self, dag_maker):
"""
Test assigning Operators to Dags, including deferred assignment
"""
with dag_maker('dag') as dag:
pass
with dag_maker('dag2') as dag2:
pass
op = DummyOperator(task_id='op_1')
# no dag assigned
assert not op.has_dag()
with pytest.raises(AirflowException):
getattr(op, 'dag')
# no improper assignment
with pytest.raises(TypeError):
op.dag = 1
op.dag = dag
# no reassignment
with pytest.raises(AirflowException):
op.dag = dag2
# but assigning the same dag is ok
op.dag = dag
assert op.dag is dag
assert op in dag.tasks
def test_infer_dag(self, create_dummy_dag):
op1 = DummyOperator(task_id='test_op_1')
op2 = DummyOperator(task_id='test_op_2')
dag, op3 = create_dummy_dag(task_id='test_op_3')
_, op4 = create_dummy_dag('dag2', task_id='test_op_4')
# double check dags
assert [i.has_dag() for i in [op1, op2, op3, op4]] == [False, False, True, True]
# can't combine operators with no dags
with pytest.raises(AirflowException):
op1.set_downstream(op2)
# op2 should infer dag from op1
op1.dag = dag
op1.set_downstream(op2)
assert op2.dag is dag
# can't assign across multiple DAGs
with pytest.raises(AirflowException):
op1.set_downstream(op4)
with pytest.raises(AirflowException):
op1.set_downstream([op3, op4])
def test_bitshift_compose_operators(self, dag_maker):
with dag_maker('dag'):
op1 = DummyOperator(task_id='test_op_1')
op2 = DummyOperator(task_id='test_op_2')
op3 = DummyOperator(task_id='test_op_3')
op1 >> op2 << op3
dag_maker.create_dagrun()
# op2 should be downstream of both
assert op2 in op1.downstream_list
assert op2 in op3.downstream_list
@patch.object(DAG, 'get_concurrency_reached')
def test_requeue_over_dag_concurrency(self, mock_concurrency_reached, create_task_instance):
mock_concurrency_reached.return_value = True
ti = create_task_instance(
dag_id='test_requeue_over_dag_concurrency',
task_id='test_requeue_over_dag_concurrency_op',
max_active_runs=1,
max_active_tasks=2,
dagrun_state=State.QUEUED,
)
ti.run()
assert ti.state == State.NONE
def test_requeue_over_max_active_tis_per_dag(self, create_task_instance):
ti = create_task_instance(
dag_id='test_requeue_over_max_active_tis_per_dag',
task_id='test_requeue_over_max_active_tis_per_dag_op',
max_active_tis_per_dag=0,
max_active_runs=1,
max_active_tasks=2,
dagrun_state=State.QUEUED,
)
ti.run()
assert ti.state == State.NONE
def test_requeue_over_pool_concurrency(self, create_task_instance, test_pool):
ti = create_task_instance(
dag_id='test_requeue_over_pool_concurrency',
task_id='test_requeue_over_pool_concurrency_op',
max_active_tis_per_dag=0,
max_active_runs=1,
max_active_tasks=2,
)
with create_session() as session:
test_pool.slots = 0
session.flush()
ti.run()
assert ti.state == State.NONE
@pytest.mark.usefixtures('test_pool')
def test_not_requeue_non_requeueable_task_instance(self, dag_maker):
# Use BaseSensorOperator because sensor got
# one additional DEP in BaseSensorOperator().deps
with dag_maker(dag_id='test_not_requeue_non_requeueable_task_instance'):
task = BaseSensorOperator(
task_id='test_not_requeue_non_requeueable_task_instance_op',
pool='test_pool',
)
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
ti.state = State.QUEUED
with create_session() as session:
session.add(ti)
session.commit()
all_deps = RUNNING_DEPS | task.deps
all_non_requeueable_deps = all_deps - REQUEUEABLE_DEPS
patch_dict = {}
for dep in all_non_requeueable_deps:
class_name = dep.__class__.__name__
dep_patch = patch(f'{dep.__module__}.{class_name}.{dep._get_dep_statuses.__name__}')
method_patch = dep_patch.start()
method_patch.return_value = iter([TIDepStatus('mock_' + class_name, True, 'mock')])
patch_dict[class_name] = (dep_patch, method_patch)
for class_name, (dep_patch, method_patch) in patch_dict.items():
method_patch.return_value = iter([TIDepStatus('mock_' + class_name, False, 'mock')])
ti.run()
assert ti.state == State.QUEUED
dep_patch.return_value = TIDepStatus('mock_' + class_name, True, 'mock')
for (dep_patch, method_patch) in patch_dict.values():
dep_patch.stop()
def test_mark_non_runnable_task_as_success(self, create_task_instance):
"""
test that running task with mark_success param update task state
as SUCCESS without running task despite it fails dependency checks.
"""
non_runnable_state = (set(State.task_states) - RUNNABLE_STATES - set(State.SUCCESS)).pop()
ti = create_task_instance(
dag_id='test_mark_non_runnable_task_as_success',
task_id='test_mark_non_runnable_task_as_success_op',
state=non_runnable_state,
)
ti.run(mark_success=True)
assert ti.state == State.SUCCESS
@pytest.mark.usefixtures('test_pool')
def test_run_pooling_task(self, create_task_instance):
"""
test that running a task in an existing pool update task state as SUCCESS.
"""
ti = create_task_instance(
dag_id='test_run_pooling_task',
task_id='test_run_pooling_task_op',
pool='test_pool',
)
ti.run()
assert ti.state == State.SUCCESS
@pytest.mark.usefixtures('test_pool')
def test_pool_slots_property(self):
"""
test that try to create a task with pool_slots less than 1
"""
with pytest.raises(ValueError, match="pool slots .* cannot be less than 1"):
dag = models.DAG(dag_id='test_run_pooling_task')
DummyOperator(
task_id='test_run_pooling_task_op',
dag=dag,
pool='test_pool',
pool_slots=0,
)
@provide_session
def test_ti_updates_with_task(self, create_task_instance, session=None):
"""
test that updating the executor_config propagates to the TaskInstance DB
"""
ti = create_task_instance(
dag_id='test_run_pooling_task',
task_id='test_run_pooling_task_op',
executor_config={'foo': 'bar'},
)
dag = ti.task.dag
ti.run(session=session)
tis = dag.get_task_instances()
assert {'foo': 'bar'} == tis[0].executor_config
task2 = DummyOperator(
task_id='test_run_pooling_task_op2',
executor_config={'bar': 'baz'},
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0),
dag=dag,
)
ti2 = TI(task=task2, run_id=ti.run_id)
session.add(ti2)
session.flush()
ti2.run(session=session)
# Ensure it's reloaded
ti2.executor_config = None
ti2.refresh_from_db(session)
assert {'bar': 'baz'} == ti2.executor_config
session.rollback()
def test_run_pooling_task_with_mark_success(self, create_task_instance):
"""
test that running task in an existing pool with mark_success param
update task state as SUCCESS without running task
despite it fails dependency checks.
"""
ti = create_task_instance(
dag_id='test_run_pooling_task_with_mark_success',
task_id='test_run_pooling_task_with_mark_success_op',
)
ti.run(mark_success=True)
assert ti.state == State.SUCCESS
def test_run_pooling_task_with_skip(self, dag_maker):
"""
test that running task which returns AirflowSkipOperator will end
up in a SKIPPED state.
"""
def raise_skip_exception():
raise AirflowSkipException
with dag_maker(dag_id='test_run_pooling_task_with_skip'):
task = PythonOperator(
task_id='test_run_pooling_task_with_skip',
python_callable=raise_skip_exception,
)
dr = dag_maker.create_dagrun(execution_date=timezone.utcnow())
ti = dr.task_instances[0]
ti.task = task
ti.run()
assert State.SKIPPED == ti.state
def test_task_sigterm_works_with_retries(self, dag_maker):
"""
Test that ensures that tasks are retried when they receive sigterm
"""
def task_function(ti):
os.kill(ti.pid, signal.SIGTERM)
with dag_maker('test_mark_failure_2'):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
retries=1,
retry_delay=datetime.timedelta(seconds=2),
)
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.task = task
with pytest.raises(AirflowException):
ti.run()
ti.refresh_from_db()
assert ti.state == State.UP_FOR_RETRY
@pytest.mark.parametrize("state", [State.SUCCESS, State.FAILED, State.SKIPPED])
def test_task_sigterm_doesnt_change_state_of_finished_tasks(self, state, dag_maker):
session = settings.Session()
def task_function(ti):
ti.state = state
session.merge(ti)
session.commit()
raise AirflowException()
with dag_maker('test_mark_failure_2'):
task = PythonOperator(
task_id='test_on_failure',
python_callable=task_function,
)
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.task = task
ti.run()
ti.refresh_from_db()
ti.state == state
@pytest.mark.parametrize(
"state, exception, retries",
[
(State.FAILED, AirflowException, 0),
(State.SKIPPED, AirflowSkipException, 0),
(State.SUCCESS, None, 0),
(State.UP_FOR_RESCHEDULE, AirflowRescheduleException(timezone.utcnow()), 0),
(State.UP_FOR_RETRY, AirflowException, 1),
],
)
def test_task_wipes_next_fields(self, session, dag_maker, state, exception, retries):
"""
Test that ensures that tasks wipe their next_method and next_kwargs
when the TI enters one of the configured states.
"""
def _raise_if_exception():
if exception:
raise exception
with dag_maker("test_deferred_method_clear"):
task = PythonOperator(
task_id="test_deferred_method_clear_task",
python_callable=_raise_if_exception,
retries=retries,
retry_delay=datetime.timedelta(seconds=2),
)
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.next_method = "execute"
ti.next_kwargs = {}
session.merge(ti)
session.commit()
ti.task = task
if state in [State.FAILED, State.UP_FOR_RETRY]:
with pytest.raises(exception):
ti.run()
else:
ti.run()
ti.refresh_from_db()
assert ti.next_method is None
assert ti.next_kwargs is None
assert ti.state == state
@freeze_time('2021-09-19 04:56:35', as_kwarg='frozen_time')
def test_retry_delay(self, dag_maker, frozen_time=None):
"""
Test that retry delays are respected
"""
with dag_maker(dag_id='test_retry_handling'):
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=3),
)
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
assert ti.try_number == 1
# first run -- up for retry
run_with_error(ti)
assert ti.state == State.UP_FOR_RETRY
assert ti.try_number == 2
# second run -- still up for retry because retry_delay hasn't expired
frozen_time.tick(delta=datetime.timedelta(seconds=3))
run_with_error(ti)
assert ti.state == State.UP_FOR_RETRY
# third run -- failed
frozen_time.tick(delta=datetime.datetime.resolution)
run_with_error(ti)
assert ti.state == State.FAILED
def test_retry_handling(self, dag_maker):
"""
Test that task retries are handled properly
"""
expected_rendered_ti_fields = {'env': None, 'bash_command': 'echo test_retry_handling; exit 1'}
with dag_maker(dag_id='test_retry_handling') as dag:
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='echo {{dag.dag_id}}; exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=0),
)
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
assert ti.try_number == 1
# first run -- up for retry
run_with_error(ti)
assert ti.state == State.UP_FOR_RETRY
assert ti._try_number == 1
assert ti.try_number == 2
# second run -- fail
run_with_error(ti)
assert ti.state == State.FAILED
assert ti._try_number == 2
assert ti.try_number == 3
# Clear the TI state since you can't run a task with a FAILED state without
# clearing it first
dag.clear()
# third run -- up for retry
run_with_error(ti)
assert ti.state == State.UP_FOR_RETRY
assert ti._try_number == 3
assert ti.try_number == 4
# fourth run -- fail
run_with_error(ti)
ti.refresh_from_db()
assert ti.state == State.FAILED
assert ti._try_number == 4
assert ti.try_number == 5
assert RenderedTaskInstanceFields.get_templated_fields(ti) == expected_rendered_ti_fields
def test_next_retry_datetime(self, dag_maker):
delay = datetime.timedelta(seconds=30)
max_delay = datetime.timedelta(minutes=60)
with dag_maker(dag_id='fail_dag'):
task = BashOperator(
task_id='task_with_exp_backoff_and_max_delay',
bash_command='exit 1',
retries=3,
retry_delay=delay,
retry_exponential_backoff=True,
max_retry_delay=max_delay,
)
ti = dag_maker.create_dagrun().task_instances[0]
ti.task = task
ti.end_date = pendulum.instance(timezone.utcnow())
date = ti.next_retry_datetime()
# between 30 * 2^0.5 and 30 * 2^1 (15 and 30)
period = ti.end_date.add(seconds=30) - ti.end_date.add(seconds=15)
assert date in period
ti.try_number = 3
date = ti.next_retry_datetime()
# between 30 * 2^2 and 30 * 2^3 (120 and 240)
period = ti.end_date.add(seconds=240) - ti.end_date.add(seconds=120)
assert date in period
ti.try_number = 5
date = ti.next_retry_datetime()
# between 30 * 2^4 and 30 * 2^5 (480 and 960)
period = ti.end_date.add(seconds=960) - ti.end_date.add(seconds=480)
assert date in period
ti.try_number = 9
date = ti.next_retry_datetime()
assert date == ti.end_date + max_delay
ti.try_number = 50
date = ti.next_retry_datetime()
assert date == ti.end_date + max_delay
@pytest.mark.parametrize("seconds", [0, 0.5, 1])
def test_next_retry_datetime_short_or_zero_intervals(self, dag_maker, seconds):
delay = datetime.timedelta(seconds=seconds)
max_delay = datetime.timedelta(minutes=60)
with dag_maker(dag_id='fail_dag'):
task = BashOperator(
task_id='task_with_exp_backoff_and_short_or_zero_time_interval',
bash_command='exit 1',
retries=3,
retry_delay=delay,
retry_exponential_backoff=True,
max_retry_delay=max_delay,
)
ti = dag_maker.create_dagrun().task_instances[0]
ti.task = task
ti.end_date = pendulum.instance(timezone.utcnow())
date = ti.next_retry_datetime()
assert date == ti.end_date + datetime.timedelta(seconds=1)
def test_reschedule_handling(self, dag_maker):
"""
Test that task reschedules are handled properly
"""
# Return values of the python sensor callable, modified during tests
done = False
fail = False
def func():
if fail:
raise AirflowException()
return done
with dag_maker(dag_id='test_reschedule_handling') as dag:
task = PythonSensor(
task_id='test_reschedule_handling_sensor',
poke_interval=0,
mode='reschedule',
python_callable=func,
retries=1,
retry_delay=datetime.timedelta(seconds=0),
)
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
assert ti._try_number == 0
assert ti.try_number == 1
def run_ti_and_assert(
run_date,
expected_start_date,
expected_end_date,
expected_duration,
expected_state,
expected_try_number,
expected_task_reschedule_count,
):
with freeze_time(run_date):
try:
ti.run()
except AirflowException:
if not fail:
raise
ti.refresh_from_db()
assert ti.state == expected_state
assert ti._try_number == expected_try_number
assert ti.try_number == expected_try_number + 1
assert ti.start_date == expected_start_date
assert ti.end_date == expected_end_date
assert ti.duration == expected_duration
trs = TaskReschedule.find_for_task_instance(ti)
assert len(trs) == expected_task_reschedule_count
date1 = timezone.utcnow()
date2 = date1 + datetime.timedelta(minutes=1)
date3 = date2 + datetime.timedelta(minutes=1)
date4 = date3 + datetime.timedelta(minutes=1)
# Run with multiple reschedules.
# During reschedule the try number remains the same, but each reschedule is recorded.
# The start date is expected to remain the initial date, hence the duration increases.
# When finished the try number is incremented and there is no reschedule expected
# for this try.
done, fail = False, False
run_ti_and_assert(date1, date1, date1, 0, State.UP_FOR_RESCHEDULE, 0, 1)
done, fail = False, False
run_ti_and_assert(date2, date1, date2, 60, State.UP_FOR_RESCHEDULE, 0, 2)
done, fail = False, False
run_ti_and_assert(date3, date1, date3, 120, State.UP_FOR_RESCHEDULE, 0, 3)
done, fail = True, False
run_ti_and_assert(date4, date1, date4, 180, State.SUCCESS, 1, 0)
# Clear the task instance.
dag.clear()
ti.refresh_from_db()
assert ti.state == State.NONE
assert ti._try_number == 1
# Run again after clearing with reschedules and a retry.
# The retry increments the try number, and for that try no reschedule is expected.
# After the retry the start date is reset, hence the duration is also reset.
done, fail = False, False
run_ti_and_assert(date1, date1, date1, 0, State.UP_FOR_RESCHEDULE, 1, 1)
done, fail = False, True
run_ti_and_assert(date2, date1, date2, 60, State.UP_FOR_RETRY, 2, 0)
done, fail = False, False
run_ti_and_assert(date3, date3, date3, 0, State.UP_FOR_RESCHEDULE, 2, 1)
done, fail = True, False
run_ti_and_assert(date4, date3, date4, 60, State.SUCCESS, 3, 0)
@pytest.mark.usefixtures('test_pool')
def test_reschedule_handling_clear_reschedules(self, dag_maker):
"""
Test that task reschedules clearing are handled properly
"""
# Return values of the python sensor callable, modified during tests
done = False
fail = False
def func():
if fail:
raise AirflowException()
return done
with dag_maker(dag_id='test_reschedule_handling') as dag:
task = PythonSensor(
task_id='test_reschedule_handling_sensor',
poke_interval=0,
mode='reschedule',
python_callable=func,
retries=1,
retry_delay=datetime.timedelta(seconds=0),
pool='test_pool',
)
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
assert ti._try_number == 0
assert ti.try_number == 1
def run_ti_and_assert(
run_date,
expected_start_date,
expected_end_date,
expected_duration,
expected_state,
expected_try_number,
expected_task_reschedule_count,
):
with freeze_time(run_date):
try:
ti.run()
except AirflowException:
if not fail:
raise
ti.refresh_from_db()
assert ti.state == expected_state
assert ti._try_number == expected_try_number
assert ti.try_number == expected_try_number + 1
assert ti.start_date == expected_start_date
assert ti.end_date == expected_end_date
assert ti.duration == expected_duration
trs = TaskReschedule.find_for_task_instance(ti)
assert len(trs) == expected_task_reschedule_count
date1 = timezone.utcnow()
done, fail = False, False
run_ti_and_assert(date1, date1, date1, 0, State.UP_FOR_RESCHEDULE, 0, 1)
# Clear the task instance.
dag.clear()
ti.refresh_from_db()
assert ti.state == State.NONE
assert ti._try_number == 0
# Check that reschedules for ti have also been cleared.
trs = TaskReschedule.find_for_task_instance(ti)
assert not trs
def test_depends_on_past(self, dag_maker):
with dag_maker(dag_id='test_depends_on_past'):
task = DummyOperator(
task_id='test_dop_task',
depends_on_past=True,
)
dag_maker.create_dagrun(
state=State.FAILED,
run_type=DagRunType.SCHEDULED,
)
run_date = task.start_date + datetime.timedelta(days=5)
dr = dag_maker.create_dagrun(
execution_date=run_date,
run_type=DagRunType.SCHEDULED,
)
ti = dr.task_instances[0]
ti.task = task
# depends_on_past prevents the run
task.run(start_date=run_date, end_date=run_date, ignore_first_depends_on_past=False)
ti.refresh_from_db()
assert ti.state is None
# ignore first depends_on_past to allow the run
task.run(start_date=run_date, end_date=run_date, ignore_first_depends_on_past=True)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
# Parameterized tests to check for the correct firing
# of the trigger_rule under various circumstances
# Numeric fields are in order:
# successes, skipped, failed, upstream_failed, done
@pytest.mark.parametrize(
"trigger_rule,successes,skipped,failed,upstream_failed,done,"
"flag_upstream_failed,expect_state,expect_completed",
[
#
# Tests for all_success
#
['all_success', 5, 0, 0, 0, 0, True, None, True],
['all_success', 2, 0, 0, 0, 0, True, None, False],
['all_success', 2, 0, 1, 0, 0, True, State.UPSTREAM_FAILED, False],
['all_success', 2, 1, 0, 0, 0, True, State.SKIPPED, False],
#
# Tests for one_success
#
['one_success', 5, 0, 0, 0, 5, True, None, True],
['one_success', 2, 0, 0, 0, 2, True, None, True],
['one_success', 2, 0, 1, 0, 3, True, None, True],
['one_success', 2, 1, 0, 0, 3, True, None, True],
['one_success', 0, 5, 0, 0, 5, True, State.SKIPPED, False],
['one_success', 0, 4, 1, 0, 5, True, State.UPSTREAM_FAILED, False],
['one_success', 0, 3, 1, 1, 5, True, State.UPSTREAM_FAILED, False],
['one_success', 0, 4, 0, 1, 5, True, State.UPSTREAM_FAILED, False],
['one_success', 0, 0, 5, 0, 5, True, State.UPSTREAM_FAILED, False],
['one_success', 0, 0, 4, 1, 5, True, State.UPSTREAM_FAILED, False],
['one_success', 0, 0, 0, 5, 5, True, State.UPSTREAM_FAILED, False],
#
# Tests for all_failed
#
['all_failed', 5, 0, 0, 0, 5, True, State.SKIPPED, False],
['all_failed', 0, 0, 5, 0, 5, True, None, True],
['all_failed', 2, 0, 0, 0, 2, True, State.SKIPPED, False],
['all_failed', 2, 0, 1, 0, 3, True, State.SKIPPED, False],
['all_failed', 2, 1, 0, 0, 3, True, State.SKIPPED, False],
#
# Tests for one_failed
#
['one_failed', 5, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 1, 0, 0, True, None, True],
['one_failed', 2, 1, 0, 0, 3, True, None, False],
['one_failed', 2, 3, 0, 0, 5, True, State.SKIPPED, False],
#
# Tests for done
#
['all_done', 5, 0, 0, 0, 5, True, None, True],
['all_done', 2, 0, 0, 0, 2, True, None, False],
['all_done', 2, 0, 1, 0, 3, True, None, False],
['all_done', 2, 1, 0, 0, 3, True, None, False],
],
)
def test_check_task_dependencies(
self,
trigger_rule: str,
successes: int,
skipped: int,
failed: int,
upstream_failed: int,
done: int,
flag_upstream_failed: bool,
expect_state: State,
expect_completed: bool,
dag_maker,
):
with dag_maker() as dag:
downstream = DummyOperator(task_id="downstream", trigger_rule=trigger_rule)
for i in range(5):
task = DummyOperator(task_id=f'runme_{i}', dag=dag)
task.set_downstream(downstream)
assert task.start_date is not None
run_date = task.start_date + datetime.timedelta(days=5)
ti = dag_maker.create_dagrun(execution_date=run_date).get_task_instance(downstream.task_id)
ti.task = downstream
dep_results = TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=flag_upstream_failed,
)
completed = all(dep.passed for dep in dep_results)
assert completed == expect_completed
assert ti.state == expect_state
def test_respects_prev_dagrun_dep(self, create_task_instance):
ti = create_task_instance()
failing_status = [TIDepStatus('test fail status name', False, 'test fail reason')]
passing_status = [TIDepStatus('test pass status name', True, 'test passing reason')]
with patch(
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep.get_dep_statuses', return_value=failing_status
):
assert not ti.are_dependencies_met()
with patch(
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep.get_dep_statuses', return_value=passing_status
):
assert ti.are_dependencies_met()
@pytest.mark.parametrize(
"downstream_ti_state, expected_are_dependents_done",
[
(State.SUCCESS, True),
(State.SKIPPED, True),
(State.RUNNING, False),
(State.FAILED, False),
(State.NONE, False),
],
)
@provide_session
def test_are_dependents_done(
self, downstream_ti_state, expected_are_dependents_done, create_task_instance, session=None
):
ti = create_task_instance(session=session)
dag = ti.task.dag
downstream_task = DummyOperator(task_id='downstream_task', dag=dag)
ti.task >> downstream_task
downstream_ti = TI(downstream_task, run_id=ti.run_id)
downstream_ti.set_state(downstream_ti_state, session)
session.flush()
assert ti.are_dependents_done(session) == expected_are_dependents_done
def test_xcom_pull(self, create_task_instance):
"""
Test xcom_pull, using different filtering methods.
"""
ti1 = create_task_instance(
dag_id='test_xcom',
task_id='test_xcom_1',
start_date=timezone.datetime(2016, 6, 1, 0, 0, 0),
)
# Push a value
ti1.xcom_push(key='foo', value='bar')
# Push another value with the same key (but by a different task)
XCom.set(
key='foo',
value='baz',
task_id='test_xcom_2',
dag_id=ti1.dag_id,
execution_date=ti1.execution_date,
)
# Pull with no arguments
result = ti1.xcom_pull()
assert result is None
# Pull the value pushed most recently by any task.
result = ti1.xcom_pull(key='foo')
assert result in 'baz'
# Pull the value pushed by the first task
result = ti1.xcom_pull(task_ids='test_xcom_1', key='foo')
assert result == 'bar'
# Pull the value pushed by the second task
result = ti1.xcom_pull(task_ids='test_xcom_2', key='foo')
assert result == 'baz'
# Pull the values pushed by both tasks & Verify Order of task_ids pass & values returned
result = ti1.xcom_pull(task_ids=['test_xcom_1', 'test_xcom_2'], key='foo')
assert result == ['bar', 'baz']
def test_xcom_pull_after_success(self, create_task_instance):
"""
tests xcom set/clear relative to a task in a 'success' rerun scenario
"""
key = 'xcom_key'
value = 'xcom_value'
ti = create_task_instance(
dag_id='test_xcom',
schedule_interval='@monthly',
task_id='test_xcom',
pool='test_xcom',
)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
assert ti.xcom_pull(task_ids='test_xcom', key=key) == value
ti.run()
# The second run and assert is to handle AIRFLOW-131 (don't clear on
# prior success)
assert ti.xcom_pull(task_ids='test_xcom', key=key) == value
# Test AIRFLOW-703: Xcom shouldn't be cleared if the task doesn't
# execute, even if dependencies are ignored
ti.run(ignore_all_deps=True, mark_success=True)
assert ti.xcom_pull(task_ids='test_xcom', key=key) == value
# Xcom IS finally cleared once task has executed
ti.run(ignore_all_deps=True)
assert ti.xcom_pull(task_ids='test_xcom', key=key) is None
def test_xcom_pull_different_execution_date(self, create_task_instance):
"""
tests xcom fetch behavior with different execution dates, using
both xcom_pull with "include_prior_dates" and without
"""
key = 'xcom_key'
value = 'xcom_value'
ti = create_task_instance(
dag_id='test_xcom',
schedule_interval='@monthly',
task_id='test_xcom',
pool='test_xcom',
)
exec_date = ti.dag_run.execution_date
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
assert ti.xcom_pull(task_ids='test_xcom', key=key) == value
ti.run()
exec_date += datetime.timedelta(days=1)
dr = ti.task.dag.create_dagrun(run_id="test2", execution_date=exec_date, state=None)
ti = TI(task=ti.task, run_id=dr.run_id)
ti.run()
# We have set a new execution date (and did not pass in
# 'include_prior_dates'which means this task should now have a cleared
# xcom value
assert ti.xcom_pull(task_ids='test_xcom', key=key) is None
# We *should* get a value using 'include_prior_dates'
assert ti.xcom_pull(task_ids='test_xcom', key=key, include_prior_dates=True) == value
def test_xcom_push_flag(self, dag_maker):
"""
Tests the option for Operators to push XComs
"""
value = 'hello'
task_id = 'test_no_xcom_push'
with dag_maker(dag_id='test_xcom'):
# nothing saved to XCom
task = PythonOperator(
task_id=task_id,
python_callable=lambda: value,
do_xcom_push=False,
)
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
ti.run()
assert ti.xcom_pull(task_ids=task_id, key=models.XCOM_RETURN_KEY) is None
def test_post_execute_hook(self, dag_maker):
"""
Test that post_execute hook is called with the Operator's result.
The result ('error') will cause an error to be raised and trapped.
"""
class TestError(Exception):
pass
class TestOperator(PythonOperator):
def post_execute(self, context, result=None):
if result == 'error':
raise TestError('expected error.')
with dag_maker(dag_id='test_post_execute_dag'):
task = TestOperator(
task_id='test_operator',
python_callable=lambda: 'error',
)
ti = dag_maker.create_dagrun(execution_date=DEFAULT_DATE).task_instances[0]
ti.task = task
with pytest.raises(TestError):
ti.run()
def test_check_and_change_state_before_execution(self, create_task_instance):
ti = create_task_instance(dag_id='test_check_and_change_state_before_execution')
assert ti._try_number == 0
assert ti.check_and_change_state_before_execution()
# State should be running, and try_number column should be incremented
assert ti.state == State.RUNNING
assert ti._try_number == 1
def test_check_and_change_state_before_execution_dep_not_met(self, create_task_instance):
ti = create_task_instance(dag_id='test_check_and_change_state_before_execution')
task2 = DummyOperator(task_id='task2', dag=ti.task.dag, start_date=DEFAULT_DATE)
ti.task >> task2
ti = TI(task=task2, run_id=ti.run_id)
assert not ti.check_and_change_state_before_execution()
def test_try_number(self, create_task_instance):
"""
Test the try_number accessor behaves in various running states
"""
ti = create_task_instance(dag_id='test_check_and_change_state_before_execution')
assert 1 == ti.try_number
ti.try_number = 2
ti.state = State.RUNNING
assert 2 == ti.try_number
ti.state = State.SUCCESS
assert 3 == ti.try_number
def test_get_num_running_task_instances(self, create_task_instance):
session = settings.Session()
ti1 = create_task_instance(
dag_id='test_get_num_running_task_instances', task_id='task1', session=session
)
dr = ti1.task.dag.create_dagrun(
execution_date=DEFAULT_DATE + datetime.timedelta(days=1),
state=None,
run_id='2',
session=session,
)
assert ti1 in session
ti2 = dr.task_instances[0]
ti2.task = ti1.task
ti3 = create_task_instance(
dag_id='test_get_num_running_task_instances_dummy', task_id='task2', session=session
)
assert ti3 in session
assert ti1 in session
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.RUNNING
assert ti3 in session
session.commit()
assert 1 == ti1.get_num_running_task_instances(session=session)
assert 1 == ti2.get_num_running_task_instances(session=session)
assert 1 == ti3.get_num_running_task_instances(session=session)
# def test_log_url(self):
# now = pendulum.now('Europe/Brussels')
# dag = DAG('dag', start_date=DEFAULT_DATE)
# task = DummyOperator(task_id='op', dag=dag)
# ti = TI(task=task, execution_date=now)
# d = urllib.parse.parse_qs(
# urllib.parse.urlparse(ti.log_url).query,
# keep_blank_values=True, strict_parsing=True)
# self.assertEqual(d['dag_id'][0], 'dag')
# self.assertEqual(d['task_id'][0], 'op')
# self.assertEqual(pendulum.parse(d['execution_date'][0]), now)
def test_log_url(self, create_task_instance):
ti = create_task_instance(dag_id='dag', task_id='op', execution_date=timezone.datetime(2018, 1, 1))
expected_url = (
'http://localhost:8080/log?'
'execution_date=2018-01-01T00%3A00%3A00%2B00%3A00'
'&task_id=op'
'&dag_id=dag'
)
assert ti.log_url == expected_url
def test_mark_success_url(self, create_task_instance):
now = pendulum.now('Europe/Brussels')
ti = create_task_instance(dag_id='dag', task_id='op', execution_date=now)
query = urllib.parse.parse_qs(
urllib.parse.urlparse(ti.mark_success_url).query, keep_blank_values=True, strict_parsing=True
)
assert query['dag_id'][0] == 'dag'
assert query['task_id'][0] == 'op'
assert pendulum.parse(query['execution_date'][0]) == now
def test_overwrite_params_with_dag_run_conf(self, create_task_instance):
ti = create_task_instance()
dag_run = ti.dag_run
dag_run.conf = {"override": True}
params = {"override": False}
ti.overwrite_params_with_dag_run_conf(params, dag_run)
assert params["override"] is True
def test_overwrite_params_with_dag_run_none(self, create_task_instance):
ti = create_task_instance()
params = {"override": False}
ti.overwrite_params_with_dag_run_conf(params, None)
assert params["override"] is False
def test_overwrite_params_with_dag_run_conf_none(self, create_task_instance):
ti = create_task_instance()
params = {"override": False}
dag_run = ti.dag_run
ti.overwrite_params_with_dag_run_conf(params, dag_run)
assert params["override"] is False
@patch('airflow.models.taskinstance.send_email')
def test_email_alert(self, mock_send_email, dag_maker):
with dag_maker(dag_id='test_failure_email'):
task = BashOperator(task_id='test_email_alert', bash_command='exit 1', email='to')
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
try:
ti.run()
except AirflowException:
pass
(email, title, body), _ = mock_send_email.call_args
assert email == 'to'
assert 'test_email_alert' in title
assert 'test_email_alert' in body
assert 'Try 1' in body
@conf_vars(
{
('email', 'subject_template'): '/subject/path',
('email', 'html_content_template'): '/html_content/path',
}
)
@patch('airflow.models.taskinstance.send_email')
def test_email_alert_with_config(self, mock_send_email, dag_maker):
with dag_maker(dag_id='test_failure_email'):
task = BashOperator(
task_id='test_email_alert_with_config',
bash_command='exit 1',
email='to',
)
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
opener = mock_open(read_data='template: {{ti.task_id}}')
with patch('airflow.models.taskinstance.open', opener, create=True):
try:
ti.run()
except AirflowException:
pass
(email, title, body), _ = mock_send_email.call_args
assert email == 'to'
assert 'template: test_email_alert_with_config' == title
assert 'template: test_email_alert_with_config' == body
def test_set_duration(self):
task = DummyOperator(task_id='op', email='test@test.test')
ti = TI(task=task)
ti.start_date = datetime.datetime(2018, 10, 1, 1)
ti.end_date = datetime.datetime(2018, 10, 1, 2)
ti.set_duration()
assert ti.duration == 3600
def test_set_duration_empty_dates(self):
task = DummyOperator(task_id='op', email='test@test.test')
ti = TI(task=task)
ti.set_duration()
assert ti.duration is None
def test_success_callback_no_race_condition(self, create_task_instance):
callback_wrapper = CallbackWrapper()
ti = create_task_instance(
on_success_callback=callback_wrapper.success_handler,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
execution_date=timezone.utcnow(),
state=State.RUNNING,
)
session = settings.Session()
session.merge(ti)
session.commit()
callback_wrapper.wrap_task_instance(ti)
ti._run_raw_task()
ti._run_finished_callback()
assert callback_wrapper.callback_ran
assert callback_wrapper.task_state_in_callback == State.SUCCESS
ti.refresh_from_db()
assert ti.state == State.SUCCESS
@staticmethod
def _test_previous_dates_setup(
schedule_interval: Union[str, datetime.timedelta, None],
catchup: bool,
scenario: List[TaskInstanceState],
dag_maker,
) -> list:
dag_id = 'test_previous_dates'
with dag_maker(dag_id=dag_id, schedule_interval=schedule_interval, catchup=catchup):
task = DummyOperator(task_id='task')
def get_test_ti(execution_date: pendulum.DateTime, state: str) -> TI:
dr = dag_maker.create_dagrun(
run_id=f'test__{execution_date.isoformat()}',
run_type=DagRunType.SCHEDULED,
state=state,
execution_date=execution_date,
start_date=pendulum.now('UTC'),
)
ti = dr.task_instances[0]
ti.task = task
ti.set_state(state=State.SUCCESS, session=dag_maker.session)
return ti
date = cast(pendulum.DateTime, pendulum.parse('2019-01-01T00:00:00+00:00'))
ret = []
for idx, state in enumerate(scenario):
new_date = date.add(days=idx)
ti = get_test_ti(new_date, state)
ret.append(ti)
return ret
_prev_dates_param_list = [
pytest.param('0 0 * * * ', True, id='cron/catchup'),
pytest.param('0 0 * * *', False, id='cron/no-catchup'),
pytest.param(None, True, id='no-sched/catchup'),
pytest.param(None, False, id='no-sched/no-catchup'),
pytest.param(datetime.timedelta(days=1), True, id='timedelta/catchup'),
pytest.param(datetime.timedelta(days=1), False, id='timedelta/no-catchup'),
]
@pytest.mark.parametrize("schedule_interval, catchup", _prev_dates_param_list)
def test_previous_ti(self, schedule_interval, catchup, dag_maker) -> None:
scenario = [State.SUCCESS, State.FAILED, State.SUCCESS]
ti_list = self._test_previous_dates_setup(schedule_interval, catchup, scenario, dag_maker)
assert ti_list[0].get_previous_ti() is None
assert ti_list[2].get_previous_ti().run_id == ti_list[1].run_id
assert ti_list[2].get_previous_ti().run_id != ti_list[0].run_id
@pytest.mark.parametrize("schedule_interval, catchup", _prev_dates_param_list)
def test_previous_ti_success(self, schedule_interval, catchup, dag_maker) -> None:
scenario = [State.FAILED, State.SUCCESS, State.FAILED, State.SUCCESS]
ti_list = self._test_previous_dates_setup(schedule_interval, catchup, scenario, dag_maker)
assert ti_list[0].get_previous_ti(state=State.SUCCESS) is None
assert ti_list[1].get_previous_ti(state=State.SUCCESS) is None
assert ti_list[3].get_previous_ti(state=State.SUCCESS).run_id == ti_list[1].run_id
assert ti_list[3].get_previous_ti(state=State.SUCCESS).run_id != ti_list[2].run_id
@pytest.mark.parametrize("schedule_interval, catchup", _prev_dates_param_list)
def test_previous_execution_date_success(self, schedule_interval, catchup, dag_maker) -> None:
scenario = [State.FAILED, State.SUCCESS, State.FAILED, State.SUCCESS]
ti_list = self._test_previous_dates_setup(schedule_interval, catchup, scenario, dag_maker)
# vivify
for ti in ti_list:
ti.execution_date
assert ti_list[0].get_previous_execution_date(state=State.SUCCESS) is None
assert ti_list[1].get_previous_execution_date(state=State.SUCCESS) is None
assert ti_list[3].get_previous_execution_date(state=State.SUCCESS) == ti_list[1].execution_date
assert ti_list[3].get_previous_execution_date(state=State.SUCCESS) != ti_list[2].execution_date
@pytest.mark.parametrize("schedule_interval, catchup", _prev_dates_param_list)
def test_previous_start_date_success(self, schedule_interval, catchup, dag_maker) -> None:
scenario = [State.FAILED, State.SUCCESS, State.FAILED, State.SUCCESS]
ti_list = self._test_previous_dates_setup(schedule_interval, catchup, scenario, dag_maker)
assert ti_list[0].get_previous_start_date(state=State.SUCCESS) is None
assert ti_list[1].get_previous_start_date(state=State.SUCCESS) is None
assert ti_list[3].get_previous_start_date(state=State.SUCCESS) == ti_list[1].start_date
assert ti_list[3].get_previous_start_date(state=State.SUCCESS) != ti_list[2].start_date
def test_get_previous_start_date_none(self, dag_maker):
"""
Test that get_previous_start_date() can handle TaskInstance with no start_date.
"""
with dag_maker("test_get_previous_start_date_none", schedule_interval=None) as dag:
task = DummyOperator(task_id="op")
day_1 = DEFAULT_DATE
day_2 = DEFAULT_DATE + datetime.timedelta(days=1)
# Create a DagRun for day_1 and day_2. Calling ti_2.get_previous_start_date()
# should return the start_date of ti_1 (which is None because ti_1 was not run).
# It should not raise an error.
dagrun_1 = dag_maker.create_dagrun(
execution_date=day_1,
state=State.RUNNING,
run_type=DagRunType.MANUAL,
)
dagrun_2 = dag.create_dagrun(
execution_date=day_2,
state=State.RUNNING,
run_type=DagRunType.MANUAL,
)
ti_1 = dagrun_1.get_task_instance(task.task_id)
ti_2 = dagrun_2.get_task_instance(task.task_id)
ti_1.task = task
ti_2.task = task
assert ti_2.get_previous_start_date() == ti_1.start_date
assert ti_1.start_date is None
def test_pendulum_template_dates(self, create_task_instance):
ti = create_task_instance(
dag_id='test_pendulum_template_dates',
task_id='test_pendulum_template_dates_task',
schedule_interval='0 12 * * *',
)
template_context = ti.get_template_context()
assert isinstance(template_context["data_interval_start"], pendulum.DateTime)
assert isinstance(template_context["data_interval_end"], pendulum.DateTime)
def test_template_render(self, create_task_instance):
ti = create_task_instance(
dag_id="test_template_render",
task_id="test_template_render_task",
schedule_interval="0 12 * * *",
)
template_context = ti.get_template_context()
result = ti.task.render_template("Task: {{ dag.dag_id }} -> {{ task.task_id }}", template_context)
assert result == "Task: test_template_render -> test_template_render_task"
def test_template_render_deprecated(self, create_task_instance):
ti = create_task_instance(
dag_id="test_template_render",
task_id="test_template_render_task",
schedule_interval="0 12 * * *",
)
template_context = ti.get_template_context()
with pytest.deprecated_call():
result = ti.task.render_template("Execution date: {{ execution_date }}", template_context)
assert result.startswith("Execution date: ")
@pytest.mark.parametrize(
"content, expected_output",
[
('{{ conn.get("a_connection").host }}', 'hostvalue'),
('{{ conn.get("a_connection", "unused_fallback").host }}', 'hostvalue'),
('{{ conn.get("missing_connection", {"host": "fallback_host"}).host }}', 'fallback_host'),
('{{ conn.a_connection.host }}', 'hostvalue'),
('{{ conn.a_connection.login }}', 'loginvalue'),
('{{ conn.a_connection.password }}', 'passwordvalue'),
('{{ conn.a_connection.extra_dejson["extra__asana__workspace"] }}', 'extra1'),
('{{ conn.a_connection.extra_dejson.extra__asana__workspace }}', 'extra1'),
],
)
def test_template_with_connection(self, content, expected_output, create_task_instance):
"""
Test the availability of variables in templates
"""
with create_session() as session:
clear_db_connections(add_default_connections_back=False)
merge_conn(
Connection(
conn_id="a_connection",
conn_type="a_type",
description="a_conn_description",
host="hostvalue",
login="loginvalue",
password="passwordvalue",
schema="schemavalues",
extra={
"extra__asana__workspace": "extra1",
},
),
session,
)
ti = create_task_instance()
context = ti.get_template_context()
result = ti.task.render_template(content, context)
assert result == expected_output
@pytest.mark.parametrize(
"content, expected_output",
[
('{{ var.value.a_variable }}', 'a test value'),
('{{ var.value.get("a_variable") }}', 'a test value'),
('{{ var.value.get("a_variable", "unused_fallback") }}', 'a test value'),
('{{ var.value.get("missing_variable", "fallback") }}', 'fallback'),
],
)
def test_template_with_variable(self, content, expected_output, create_task_instance):
"""
Test the availability of variables in templates
"""
Variable.set('a_variable', 'a test value')
ti = create_task_instance()
context = ti.get_template_context()
result = ti.task.render_template(content, context)
assert result == expected_output
def test_template_with_variable_missing(self, create_task_instance):
"""
Test the availability of variables in templates
"""
ti = create_task_instance()
context = ti.get_template_context()
with pytest.raises(KeyError):
ti.task.render_template('{{ var.value.get("missing_variable") }}', context)
@pytest.mark.parametrize(
"content, expected_output",
[
('{{ var.value.a_variable }}', '{\n "a": {\n "test": "value"\n }\n}'),
('{{ var.json.a_variable["a"]["test"] }}', 'value'),
('{{ var.json.get("a_variable")["a"]["test"] }}', 'value'),
('{{ var.json.get("a_variable", {"a": {"test": "unused_fallback"}})["a"]["test"] }}', 'value'),
('{{ var.json.get("missing_variable", {"a": {"test": "fallback"}})["a"]["test"] }}', 'fallback'),
],
)
def test_template_with_json_variable(self, content, expected_output, create_task_instance):
"""
Test the availability of variables in templates
"""
Variable.set('a_variable', {'a': {'test': 'value'}}, serialize_json=True)
ti = create_task_instance()
context = ti.get_template_context()
result = ti.task.render_template(content, context)
assert result == expected_output
def test_template_with_json_variable_missing(self, create_task_instance):
ti = create_task_instance()
context = ti.get_template_context()
with pytest.raises(KeyError):
ti.task.render_template('{{ var.json.get("missing_variable") }}', context)
@pytest.mark.parametrize(
("field", "expected"),
[
("next_ds", "2016-01-01"),
("next_ds_nodash", "20160101"),
("prev_ds", "2015-12-31"),
("prev_ds_nodash", "20151231"),
("yesterday_ds", "2015-12-31"),
("yesterday_ds_nodash", "20151231"),
("tomorrow_ds", "2016-01-02"),
("tomorrow_ds_nodash", "20160102"),
],
)
def test_deprecated_context(self, field, expected, create_task_instance):
ti = create_task_instance(execution_date=DEFAULT_DATE)
context = ti.get_template_context()
with pytest.deprecated_call() as recorder:
assert context[field] == expected
message_beginning = (
f"Accessing {field!r} from the template is deprecated and "
f"will be removed in a future version."
)
recorded_message = [str(m.message) for m in recorder]
assert len(recorded_message) == 1
assert recorded_message[0].startswith(message_beginning)
def test_template_with_custom_timetable_deprecated_context(self, create_task_instance):
ti = create_task_instance(
start_date=DEFAULT_DATE,
timetable=AfterWorkdayTimetable(),
run_type=DagRunType.SCHEDULED,
execution_date=timezone.datetime(2021, 9, 6),
data_interval=(timezone.datetime(2021, 9, 6), timezone.datetime(2021, 9, 7)),
)
context = ti.get_template_context()
with pytest.deprecated_call():
assert context["execution_date"] == pendulum.DateTime(2021, 9, 6, tzinfo=timezone.TIMEZONE)
with pytest.deprecated_call():
assert context["next_ds"] == "2021-09-07"
with pytest.deprecated_call():
assert context["next_ds_nodash"] == "20210907"
with pytest.deprecated_call():
assert context["next_execution_date"] == pendulum.DateTime(2021, 9, 7, tzinfo=timezone.TIMEZONE)
with pytest.deprecated_call():
assert context["prev_ds"] is None, "Does not make sense for custom timetable"
with pytest.deprecated_call():
assert context["prev_ds_nodash"] is None, "Does not make sense for custom timetable"
with pytest.deprecated_call():
assert context["prev_execution_date"] is None, "Does not make sense for custom timetable"
def test_execute_callback(self, create_task_instance):
called = False
def on_execute_callable(context):
nonlocal called
called = True
assert context['dag_run'].dag_id == 'test_dagrun_execute_callback'
ti = create_task_instance(
dag_id='test_execute_callback',
on_execute_callback=on_execute_callable,
state=State.RUNNING,
)
session = settings.Session()
session.merge(ti)
session.commit()
ti._run_raw_task()
assert called
ti.refresh_from_db()
assert ti.state == State.SUCCESS
@pytest.mark.parametrize(
"finished_state, expected_message",
[
(State.SUCCESS, "Error when executing on_success_callback"),
(State.UP_FOR_RETRY, "Error when executing on_retry_callback"),
(State.FAILED, "Error when executing on_failure_callback"),
],
)
def test_finished_callbacks_handle_and_log_exception(
self, finished_state, expected_message, create_task_instance
):
called = completed = False
def on_finish_callable(context):
nonlocal called, completed
called = True
raise KeyError
completed = True
ti = create_task_instance(
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
on_success_callback=on_finish_callable,
on_retry_callback=on_finish_callable,
on_failure_callback=on_finish_callable,
state=finished_state,
)
ti._log = mock.Mock()
ti._run_finished_callback()
assert called
assert not completed
ti.log.exception.assert_called_once_with(expected_message)
@provide_session
def test_handle_failure(self, create_dummy_dag, session=None):
start_date = timezone.datetime(2016, 6, 1)
clear_db_runs()
mock_on_failure_1 = mock.MagicMock()
mock_on_retry_1 = mock.MagicMock()
dag, task1 = create_dummy_dag(
dag_id="test_handle_failure",
schedule_interval=None,
start_date=start_date,
task_id="test_handle_failure_on_failure",
with_dagrun_type=DagRunType.MANUAL,
on_failure_callback=mock_on_failure_1,
on_retry_callback=mock_on_retry_1,
session=session,
)
dr = dag.create_dagrun(
run_id="test2",
run_type=DagRunType.MANUAL,
execution_date=timezone.utcnow(),
state=None,
session=session,
)
ti1 = dr.get_task_instance(task1.task_id, session=session)
ti1.task = task1
ti1.state = State.FAILED
ti1.handle_failure("test failure handling")
ti1._run_finished_callback()
context_arg_1 = mock_on_failure_1.call_args[0][0]
assert context_arg_1 and "task_instance" in context_arg_1
mock_on_retry_1.assert_not_called()
mock_on_failure_2 = mock.MagicMock()
mock_on_retry_2 = mock.MagicMock()
task2 = DummyOperator(
task_id="test_handle_failure_on_retry",
on_failure_callback=mock_on_failure_2,
on_retry_callback=mock_on_retry_2,
retries=1,
dag=dag,
)
ti2 = TI(task=task2, run_id=dr.run_id)
ti2.state = State.FAILED
session.add(ti2)
session.flush()
ti2.handle_failure("test retry handling")
ti2._run_finished_callback()
mock_on_failure_2.assert_not_called()
context_arg_2 = mock_on_retry_2.call_args[0][0]
assert context_arg_2 and "task_instance" in context_arg_2
# test the scenario where normally we would retry but have been asked to fail
mock_on_failure_3 = mock.MagicMock()
mock_on_retry_3 = mock.MagicMock()
task3 = DummyOperator(
task_id="test_handle_failure_on_force_fail",
on_failure_callback=mock_on_failure_3,
on_retry_callback=mock_on_retry_3,
retries=1,
dag=dag,
)
ti3 = TI(task=task3, run_id=dr.run_id)
session.add(ti3)
session.flush()
ti3.state = State.FAILED
ti3.handle_failure("test force_fail handling", force_fail=True)
ti3._run_finished_callback()
context_arg_3 = mock_on_failure_3.call_args[0][0]
assert context_arg_3 and "task_instance" in context_arg_3
mock_on_retry_3.assert_not_called()
def test_handle_failure_updates_queued_task_try_number(self, dag_maker):
session = settings.Session()
with dag_maker():
task = DummyOperator(task_id="mytask", retries=1)
dr = dag_maker.create_dagrun()
ti = TI(task=task, run_id=dr.run_id)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
assert ti.state == State.QUEUED
assert ti.try_number == 1
ti.handle_failure("test queued ti", test_mode=True)
assert ti.state == State.UP_FOR_RETRY
# Assert that 'ti._try_number' is bumped from 0 to 1. This is the last/current try
assert ti._try_number == 1
# Check 'ti.try_number' is bumped to 2. This is try_number for next run
assert ti.try_number == 2
def test_does_not_retry_on_airflow_fail_exception(self, dag_maker):
def fail():
raise AirflowFailException("hopeless")
with dag_maker(dag_id='test_does_not_retry_on_airflow_fail_exception'):
task = PythonOperator(
task_id='test_raise_airflow_fail_exception',
python_callable=fail,
retries=1,
)
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
try:
ti.run()
except AirflowFailException:
pass # expected
assert State.FAILED == ti.state
def test_retries_on_other_exceptions(self, dag_maker):
def fail():
raise AirflowException("maybe this will pass?")
with dag_maker(dag_id='test_retries_on_other_exceptions'):
task = PythonOperator(
task_id='test_raise_other_exception',
python_callable=fail,
retries=1,
)
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
ti.task = task
try:
ti.run()
except AirflowException:
pass # expected
assert State.UP_FOR_RETRY == ti.state
def _env_var_check_callback(self):
assert 'test_echo_env_variables' == os.environ['AIRFLOW_CTX_DAG_ID']
assert 'hive_in_python_op' == os.environ['AIRFLOW_CTX_TASK_ID']
assert DEFAULT_DATE.isoformat() == os.environ['AIRFLOW_CTX_EXECUTION_DATE']
assert DagRun.generate_run_id(DagRunType.MANUAL, DEFAULT_DATE) == os.environ['AIRFLOW_CTX_DAG_RUN_ID']
def test_echo_env_variables(self, dag_maker):
with dag_maker(
'test_echo_env_variables',
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
):
op = PythonOperator(task_id='hive_in_python_op', python_callable=self._env_var_check_callback)
dr = dag_maker.create_dagrun(
run_type=DagRunType.MANUAL,
external_trigger=False,
)
ti = TI(task=op, run_id=dr.run_id)
ti.state = State.RUNNING
session = settings.Session()
session.merge(ti)
session.commit()
ti._run_raw_task()
ti.refresh_from_db()
assert ti.state == State.SUCCESS
@patch.object(Stats, 'incr')
def test_task_stats(self, stats_mock, create_task_instance):
ti = create_task_instance(
dag_id='test_task_start_end_stats',
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
state=State.RUNNING,
)
stats_mock.reset_mock()
session = settings.Session()
session.merge(ti)
session.commit()
ti._run_raw_task()
ti.refresh_from_db()
stats_mock.assert_called_with(f'ti.finish.{ti.dag_id}.{ti.task_id}.{ti.state}')
assert call(f'ti.start.{ti.dag_id}.{ti.task_id}') in stats_mock.mock_calls
assert stats_mock.call_count == 4
def test_command_as_list(self, create_task_instance):
ti = create_task_instance()
ti.task.dag.fileloc = os.path.join(TEST_DAGS_FOLDER, 'x.py')
assert ti.command_as_list() == [
'airflow',
'tasks',
'run',
ti.dag_id,
ti.task_id,
ti.run_id,
'--subdir',
'DAGS_FOLDER/x.py',
]
def test_generate_command_default_param(self):
dag_id = 'test_generate_command_default_param'
task_id = 'task'
assert_command = ['airflow', 'tasks', 'run', dag_id, task_id, 'run_1']
generate_command = TI.generate_command(dag_id=dag_id, task_id=task_id, run_id='run_1')
assert assert_command == generate_command
def test_generate_command_specific_param(self):
dag_id = 'test_generate_command_specific_param'
task_id = 'task'
assert_command = [
'airflow',
'tasks',
'run',
dag_id,
task_id,
'run_1',
'--mark-success',
]
generate_command = TI.generate_command(
dag_id=dag_id, task_id=task_id, run_id='run_1', mark_success=True
)
assert assert_command == generate_command
@provide_session
def test_get_rendered_template_fields(self, dag_maker, session=None):
with dag_maker('test-dag', session=session) as dag:
task = BashOperator(task_id='op1', bash_command="{{ task.task_id }}")
dag.fileloc = TEST_DAGS_FOLDER + '/test_get_k8s_pod_yaml.py'
ti = dag_maker.create_dagrun().task_instances[0]
ti.task = task
session.add(RenderedTaskInstanceFields(ti))
session.flush()
# Create new TI for the same Task
new_task = BashOperator(task_id='op12', bash_command="{{ task.task_id }}", dag=dag)
new_ti = TI(task=new_task, run_id=ti.run_id)
new_ti.get_rendered_template_fields(session=session)
assert "op1" == ti.task.bash_command
# CleanUp
with create_session() as session:
session.query(RenderedTaskInstanceFields).delete()
@mock.patch.dict(os.environ, {"AIRFLOW_IS_K8S_EXECUTOR_POD": "True"})
@mock.patch("airflow.settings.pod_mutation_hook")
def test_render_k8s_pod_yaml(self, pod_mutation_hook, create_task_instance):
ti = create_task_instance(
dag_id='test_render_k8s_pod_yaml',
run_id='test_run_id',
task_id='op1',
execution_date=DEFAULT_DATE,
)
expected_pod_spec = {
'metadata': {
'annotations': {
'dag_id': 'test_render_k8s_pod_yaml',
'execution_date': '2016-01-01T00:00:00+00:00',
'task_id': 'op1',
'try_number': '1',
},
'labels': {
'airflow-worker': '0',
'airflow_version': version,
'dag_id': 'test_render_k8s_pod_yaml',
'execution_date': '2016-01-01T00_00_00_plus_00_00',
'kubernetes_executor': 'True',
'task_id': 'op1',
'try_number': '1',
},
'name': mock.ANY,
'namespace': 'default',
},
'spec': {
'containers': [
{
'args': [
'airflow',
'tasks',
'run',
'test_render_k8s_pod_yaml',
'op1',
'test_run_id',
'--subdir',
__file__,
],
'name': 'base',
'env': [{'name': 'AIRFLOW_IS_K8S_EXECUTOR_POD', 'value': 'True'}],
}
]
},
}
assert ti.render_k8s_pod_yaml() == expected_pod_spec
pod_mutation_hook.assert_called_once_with(mock.ANY)
@mock.patch.dict(os.environ, {"AIRFLOW_IS_K8S_EXECUTOR_POD": "True"})
@mock.patch.object(RenderedTaskInstanceFields, 'get_k8s_pod_yaml')
def test_get_rendered_k8s_spec(self, rtif_get_k8s_pod_yaml, create_task_instance):
# Create new TI for the same Task
ti = create_task_instance()
patcher = mock.patch.object(ti, 'render_k8s_pod_yaml', autospec=True)
fake_spec = {"ermagawds": "pods"}
session = mock.Mock()
with patcher as render_k8s_pod_yaml:
rtif_get_k8s_pod_yaml.return_value = fake_spec
assert ti.get_rendered_k8s_spec(session) == fake_spec
rtif_get_k8s_pod_yaml.assert_called_once_with(ti, session=session)
render_k8s_pod_yaml.assert_not_called()
# Now test that when we _dont_ find it in the DB, it calls render_k8s_pod_yaml
rtif_get_k8s_pod_yaml.return_value = None
render_k8s_pod_yaml.return_value = fake_spec
assert ti.get_rendered_k8s_spec(session) == fake_spec
render_k8s_pod_yaml.assert_called_once()
def test_set_state_up_for_retry(self, create_task_instance):
ti = create_task_instance(state=State.RUNNING)
start_date = timezone.utcnow()
ti.start_date = start_date
ti.set_state(State.UP_FOR_RETRY)
assert ti.state == State.UP_FOR_RETRY
assert ti.start_date == start_date, "Start date should have been left alone"
assert ti.start_date < ti.end_date
assert ti.duration > 0
def test_refresh_from_db(self, create_task_instance):
run_date = timezone.utcnow()
expected_values = {
"task_id": "test_refresh_from_db_task",
"dag_id": "test_refresh_from_db_dag",
"run_id": "test",
"map_index": -1,
"start_date": run_date + datetime.timedelta(days=1),
"end_date": run_date + datetime.timedelta(days=1, seconds=1, milliseconds=234),
"duration": 1.234,
"state": State.SUCCESS,
"_try_number": 1,
"max_tries": 1,
"hostname": "some_unique_hostname",
"unixname": "some_unique_unixname",
"job_id": 1234,
"pool": "some_fake_pool_id",
"pool_slots": 25,
"queue": "some_queue_id",
"priority_weight": 123,
"operator": "some_custom_operator",
"queued_dttm": run_date + datetime.timedelta(hours=1),
"queued_by_job_id": 321,
"pid": 123,
"executor_config": {"Some": {"extra": "information"}},
"external_executor_id": "some_executor_id",
"trigger_timeout": None,
"trigger_id": None,
"next_kwargs": None,
"next_method": None,
}
# Make sure we aren't missing any new value in our expected_values list.
expected_keys = {f"task_instance.{key.lstrip('_')}" for key in expected_values}
assert {str(c) for c in TI.__table__.columns} == expected_keys, (
"Please add all non-foreign values of TaskInstance to this list. "
"This prevents refresh_from_db() from missing a field."
)
ti = create_task_instance(task_id=expected_values['task_id'], dag_id=expected_values['dag_id'])
for key, expected_value in expected_values.items():
setattr(ti, key, expected_value)
with create_session() as session:
session.merge(ti)
session.commit()
mock_task = mock.MagicMock()
mock_task.task_id = expected_values["task_id"]
mock_task.dag_id = expected_values["dag_id"]
ti = TI(task=mock_task, run_id="test")
ti.refresh_from_db()
for key, expected_value in expected_values.items():
assert hasattr(ti, key), f"Key {key} is missing in the TaskInstance."
assert (
getattr(ti, key) == expected_value
), f"Key: {key} had different values. Make sure it loads it in the refresh refresh_from_db()"
def test_operator_field_with_serialization(self, create_task_instance):
ti = create_task_instance()
assert ti.task.task_type == 'DummyOperator'
# Verify that ti.operator field renders correctly "without" Serialization
assert ti.operator == "DummyOperator"
serialized_op = SerializedBaseOperator.serialize_operator(ti.task)
deserialized_op = SerializedBaseOperator.deserialize_operator(serialized_op)
assert deserialized_op.task_type == 'DummyOperator'
# Verify that ti.operator field renders correctly "with" Serialization
ser_ti = TI(task=deserialized_op, run_id=None)
assert ser_ti.operator == "DummyOperator"
@pytest.mark.parametrize("pool_override", [None, "test_pool2"])
def test_refresh_from_task(pool_override):
task = DummyOperator(
task_id="dummy",
queue="test_queue",
pool="test_pool1",
pool_slots=3,
priority_weight=10,
run_as_user="test",
retries=30,
executor_config={"KubernetesExecutor": {"image": "myCustomDockerImage"}},
)
ti = TI(task, run_id=None)
ti.refresh_from_task(task, pool_override=pool_override)
assert ti.queue == task.queue
if pool_override:
assert ti.pool == pool_override
else:
assert ti.pool == task.pool
assert ti.pool_slots == task.pool_slots
assert ti.priority_weight == task.priority_weight_total
assert ti.run_as_user == task.run_as_user
assert ti.max_tries == task.retries
assert ti.executor_config == task.executor_config
assert ti.operator == DummyOperator.__name__
class TestRunRawTaskQueriesCount:
"""
These tests are designed to detect changes in the number of queries executed
when calling _run_raw_task
"""
@staticmethod
def _clean():
db.clear_db_runs()
db.clear_db_pools()
db.clear_db_dags()
db.clear_db_sla_miss()
db.clear_db_import_errors()
def setup_method(self) -> None:
self._clean()
def teardown_method(self) -> None:
self._clean()
@pytest.mark.parametrize("expected_query_count, mark_success", [(12, False), (5, True)])
@provide_session
def test_execute_queries_count(
self, expected_query_count, mark_success, create_task_instance, session=None
):
ti = create_task_instance(session=session, state=State.RUNNING)
assert ti.dag_run
# an extra query is fired in RenderedTaskInstanceFields.delete_old_records
# for other DBs. delete_old_records is called only when mark_success is False
expected_query_count_based_on_db = (
expected_query_count + 1
if session.bind.dialect.name == "mssql" and expected_query_count > 0 and not mark_success
else expected_query_count
)
session.flush()
with assert_queries_count(expected_query_count_based_on_db):
ti._run_raw_task(mark_success=mark_success, session=session)
@provide_session
def test_execute_queries_count_store_serialized(self, create_task_instance, session=None):
ti = create_task_instance(session=session, state=State.RUNNING)
assert ti.dag_run
# an extra query is fired in RenderedTaskInstanceFields.delete_old_records
# for other DBs
expected_query_count_based_on_db = 5
session.flush()
with assert_queries_count(expected_query_count_based_on_db):
ti._run_raw_task(session)
@pytest.mark.parametrize("mode", ["poke", "reschedule"])
@pytest.mark.parametrize("retries", [0, 1])
def test_sensor_timeout(mode, retries, dag_maker):
"""
Test that AirflowSensorTimeout does not cause sensor to retry.
"""
def timeout():
raise AirflowSensorTimeout
mock_on_failure = mock.MagicMock()
with dag_maker(dag_id=f'test_sensor_timeout_{mode}_{retries}'):
PythonSensor(
task_id='test_raise_sensor_timeout',
python_callable=timeout,
on_failure_callback=mock_on_failure,
retries=retries,
mode=mode,
)
ti = dag_maker.create_dagrun(execution_date=timezone.utcnow()).task_instances[0]
with pytest.raises(AirflowSensorTimeout):
ti.run()
assert mock_on_failure.called
assert ti.state == State.FAILED
class TestTaskInstanceRecordTaskMapXComPush:
"""Test TI.xcom_push() correctly records return values for task-mapping."""
def setup_class(self):
"""Ensure we start fresh."""
with create_session() as session:
session.query(TaskMap).delete()
def _run_ti_with_faked_mapped_dependants(self, ti):
# TODO: We can't actually put a MappedOperator in a DAG yet due to it
# lacking some functions we expect from BaseOperator, so we mock this
# instead to test what effect it has to TaskMap recording.
with mock.patch.object(ti.task, "has_mapped_dependants", new=lambda: True):
ti.run()
@pytest.mark.parametrize("xcom_value", [[1, 2, 3], {"a": 1, "b": 2}, "abc"])
def test_not_recorded_for_unused(self, dag_maker, xcom_value):
"""A value not used for task-mapping should not be recorded."""
with dag_maker(dag_id="test_not_recorded_for_unused") as dag:
@dag.task()
def push_something():
return xcom_value
push_something()
ti = dag_maker.create_dagrun().task_instances[0]
ti.run()
assert dag_maker.session.query(TaskMap).count() == 0
def test_error_if_unmappable(self, caplog, dag_maker):
"""If an unmappable return value is used to map, fail the task that pushed the XCom."""
with dag_maker(dag_id="test_not_recorded_for_unused") as dag:
@dag.task()
def push_something():
return "abc"
push_something()
ti = dag_maker.create_dagrun().task_instances[0]
with pytest.raises(UnmappableXComPushed) as ctx:
self._run_ti_with_faked_mapped_dependants(ti)
assert dag_maker.session.query(TaskMap).count() == 0
assert ti.state == TaskInstanceState.FAILED
assert str(ctx.value) == "unmappable return type 'str'"
@pytest.mark.parametrize(
"xcom_value, expected_length, expected_keys",
[
([1, 2, 3], 3, None),
({"a": 1, "b": 2}, 2, ["a", "b"]),
],
)
def test_written_task_map(self, dag_maker, xcom_value, expected_length, expected_keys):
"""Return value should be recorded in TaskMap if it's used by a downstream to map."""
with dag_maker(dag_id="test_written_task_map") as dag:
@dag.task()
def push_something():
return xcom_value
push_something()
dag_run = dag_maker.create_dagrun()
ti = next(ti for ti in dag_run.task_instances if ti.task_id == "push_something")
self._run_ti_with_faked_mapped_dependants(ti)
task_map = dag_maker.session.query(TaskMap).one()
assert task_map.dag_id == "test_written_task_map"
assert task_map.task_id == "push_something"
assert task_map.run_id == dag_run.run_id
assert task_map.map_index == -1
assert task_map.length == expected_length
assert task_map.keys == expected_keys
| {
"content_hash": "565584d996989b783e13d65a299900c4",
"timestamp": "",
"source": "github",
"line_count": 2304,
"max_line_length": 110,
"avg_line_length": 37.39366319444444,
"alnum_prop": 0.5826011258777785,
"repo_name": "mistercrunch/airflow",
"id": "e11594f75fcc0d171281f7ed0217020aec5637df",
"size": "86943",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/models/test_taskinstance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
} |
from sh import Command
from functools import wraps
def kwargs_spec(command, arguments, schema):
def kwargs_spec_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
kwargs = schema(kwargs)
for k, v in kwargs.items():
if k.startswith('--'):
del kwargs[k]
kwargs[k[2:].replace('-', '_')] = v
f.__globals__['cmd'] = str(Command(command).bake(**kwargs).bake(arguments))
return f(*args, **kwargs)
setattr(wrapper, '__annotations__', {'kwargs': schema})
return wrapper
return kwargs_spec_decorator | {
"content_hash": "49db9f073ba37e0e08229b4ccfa79035",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 87,
"avg_line_length": 37.470588235294116,
"alnum_prop": 0.5416012558869702,
"repo_name": "rominf/salt_kwargs",
"id": "e0b613d98d3c4e23e9fd8691ff3bf25d8c218e18",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt_kwargs_cmd.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1315"
}
],
"symlink_target": ""
} |
""" fourch (stylised as 4ch) is an easy-to-implement Python wrapper for
4chan's JSON API, as provided by moot.
It uses the documentation of the 4chan API located at:
https://github.com/4chan/4chan-API
This is based off of the API last updated Aug 12, 2014.
(4chan-API commit: 1b2bc7858afc555127b8911b4d760480769872a9)
"""
from ._version import __version__
from .fourch import urls
from .thread import Thread
from .board import Board
from .reply import Reply
import requests
def boards(https=False):
""" Get a list of all boards on 4chan, in :class:`fourch.board.Board`
objects.
:param https: Should we use HTTPS or HTTP?
:type https: bool
"""
s = requests.Session()
s.headers.update({
"User-Agent": "fourch/{0} (@https://github.com/sysr-q/4ch)".format(
__version__
)
})
proto = "https://" if https else "http://"
url = proto + urls['api'] + urls["api_boards"]
r = s.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
boards = []
for json_board in r.json()['boards']:
boards.append(Board(json_board['board'], https=https))
return boards
| {
"content_hash": "e5eba43a1ddaea2a8c1ca7cfdad9c229",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 75,
"avg_line_length": 27.976744186046513,
"alnum_prop": 0.6309226932668329,
"repo_name": "sysr-q/4ch",
"id": "ba1306d0fab01909d5a16f52cd61d53c5681ea46",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fourch/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25148"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billing', '0008_order_rfidcard'),
]
operations = [
migrations.AlterField(
model_name='order',
name='rfidcard',
field=models.ForeignKey(verbose_name='rfid card', blank=True, to='billing.RfidCard', null=True, on_delete=models.SET_NULL),
),
]
| {
"content_hash": "a4543aa896e04b7142e25707b149748d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 135,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.6195652173913043,
"repo_name": "Inter-Actief/alexia",
"id": "83bef29202a18a934b82e5382265fb9e871c8b55",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alexia/apps/billing/migrations/0009_alter_order_rfidcard.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17029"
},
{
"name": "HTML",
"bytes": "179103"
},
{
"name": "JavaScript",
"bytes": "511580"
},
{
"name": "Python",
"bytes": "372488"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from .views import TestView
urlpatterns = patterns(
'',
url(r'^', TestView.as_view()),
)
| {
"content_hash": "d093a08cb01bd446c77d29c444dba450",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 17.75,
"alnum_prop": 0.6690140845070423,
"repo_name": "arsham/django-cachedpaginator",
"id": "1340b70d53a9631bbf29c58c5a9620b392a4b836",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_cachedpaginator/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "470"
},
{
"name": "Python",
"bytes": "15074"
}
],
"symlink_target": ""
} |
import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
DIRECTORY = getattr(settings, 'FILEMANAGER_DIRECTORY', 'uploads')
MEDIA_ROOT = getattr(settings, "FILEMANAGER_MEDIA_ROOT", os.path.join(settings.MEDIA_ROOT, DIRECTORY))
MEDIA_URL = getattr(settings, "FILEMANAGER_MEDIA_URL", os.path.join(settings.MEDIA_URL, DIRECTORY))
STORAGE = getattr(settings, "FILEMANAGER_STORAGE", FileSystemStorage(location=MEDIA_ROOT, base_url=MEDIA_URL))
| {
"content_hash": "b09bf7a6a3b06a9e55f85862e8377fa6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 110,
"avg_line_length": 37.30769230769231,
"alnum_prop": 0.7876288659793814,
"repo_name": "byteweaver/django-filemanager",
"id": "9375c2de71c3a8fcaf8c777eb0bbed4e924b13d3",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filemanager/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "130"
},
{
"name": "HTML",
"bytes": "4515"
},
{
"name": "JavaScript",
"bytes": "72386"
},
{
"name": "Python",
"bytes": "13242"
}
],
"symlink_target": ""
} |
from panda3d.core import *
from utils import *
from objparser import *
class ObjLoader(ObjListener):
def __init__(self, nodeName):
ObjListener.__init__(self)
self.vdataFormat = GeomVertexFormat.getV3n3t2()
self.node = PandaNode(nodeName)
self.triangles = None
self.object("object")
def object(self, name):
print "object:", name
self.objectName = name
self.vertices = []
self.texcoords = []
self.normals = []
self.vertexCopies = []
self.copyIndices = dict()
self.reverseCopyIndices = dict()
self.vdata = GeomVertexData(name, self.vdataFormat, Geom.UHDynamic)
self.vdataVertex = None
self.vdataTexcoord = None
self.vdataNormal = None
self.faceCount = 0
self.group(name)
def group(self, name):
print "group:", name
self.finishGroup()
self.groupName = name
self.geometry = None
self.triangles = None
def vertex(self, values):
self.vertices.append([values[0], -values[2], values[1]])
self.vertexCopies.append([])
def vertexTexture(self, values):
self.texcoords.append(values)
def vertexNormal(self, values):
self.normals.append(values)
def face(self, values):
indices = []
generateNormals = 0
for v in values:
key = str(v) + "/" + str(self.faceCount)
if key in self.copyIndices:
indices.append(self.copyIndices[key])
else:
copyIndex = self.copyIndices.setdefault(key, len(self.copyIndices))
indices.append(copyIndex)
index = v[0] - 1
self.reverseCopyIndices[copyIndex] = index
self.vertexCopies[index].append(copyIndex)
n = len(v)
generateNormals += 1
if 1 <= n:
self.ensureVdataVertex()
addData(self.vdataVertex, self.vertices[index])
if 2 <= n:
self.ensureVdataTexcoord()
addData(self.vdataTexcoord, self.texcoords[v[1] - 1])
if 3 == n:
--generateNormals
self.ensureVdataNormal()
addData(self.vdataNormal, self.normals[v[2] - 1])
if n < 1 or 3 < n:
raise Exception("Invalid vector size: %d" % n)
n = len(indices)
normal = Vec3(0.0, 1.0, 0.0)
if 3 <= n:
self.ensureTriangles()
for i in range(2, n):
self.triangles.addVertex(indices[0])
self.triangles.addVertex(indices[i - 1])
self.triangles.addVertex(indices[i])
if 0 < generateNormals:
a = vec3(self.vertices[self.reverseCopyIndices[indices[0]]])
b = vec3(self.vertices[self.reverseCopyIndices[indices[1]]])
c = vec3(self.vertices[self.reverseCopyIndices[indices[2]]])
normal = Vec3(b - a).cross(Vec3(c - a))
normal.normalize()
for i in range(generateNormals):
self.ensureVdataNormal()
addData(self.vdataNormal, normal)
self.faceCount += 1
def finishGroup(self):
if not self.triangles is None:
self.triangles.closePrimitive()
self.ensureGeometry()
self.geometry.addPrimitive(self.triangles)
geomNode = GeomNode(self.groupName)
geomNode.addGeom(self.geometry)
self.node.addChild(geomNode)
def ensureVdataVertex(self):
if self.vdataVertex is None:
self.vdataVertex = GeomVertexWriter(self.vdata, 'vertex')
def ensureVdataTexcoord(self):
if self.vdataTexcoord is None:
self.vdataTexcoord = GeomVertexWriter(self.vdata, 'texcoord')
def ensureVdataNormal(self):
if self.vdataNormal is None:
self.vdataNormal = GeomVertexWriter(self.vdata, 'normal')
def ensureTriangles(self):
if self.triangles is None:
self.triangles = GeomTriangles(Geom.UHDynamic)
def ensureGeometry(self):
if self.geometry is None:
self.geometry = Geom(self.vdata)
| {
"content_hash": "634528f9f680768edc0a7807dcc2ece7",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 71,
"avg_line_length": 25.423357664233578,
"alnum_prop": 0.6933677863910422,
"repo_name": "codistmonk/burdenofproof",
"id": "f9a6df951211ae5ce5bb7a674cc74d96ab917096",
"size": "3483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/generatehuman/objloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "84"
},
{
"name": "C++",
"bytes": "52026"
},
{
"name": "Python",
"bytes": "837717"
},
{
"name": "Shell",
"bytes": "672"
}
],
"symlink_target": ""
} |
import base64
import gzip
import mock
import shutil
import tempfile
from oslo_concurrency import processutils
from oslotest import base as test_base
import requests
from ironic_lib import disk_partitioner
from ironic_lib import disk_utils
from ironic_lib import exception
from ironic_lib import utils
@mock.patch.object(utils, 'execute')
class ListPartitionsTestCase(test_base.BaseTestCase):
def test_correct(self, execute_mock):
output = """
BYT;
/dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:;
1:1.00MiB:501MiB:500MiB:ext4::boot;
2:501MiB:476940MiB:476439MiB:::;
"""
expected = [
{'start': 1, 'end': 501, 'size': 500,
'filesystem': 'ext4', 'flags': 'boot'},
{'start': 501, 'end': 476940, 'size': 476439,
'filesystem': '', 'flags': ''},
]
execute_mock.return_value = (output, '')
result = disk_utils.list_partitions('/dev/fake')
self.assertEqual(expected, result)
execute_mock.assert_called_once_with(
'parted', '-s', '-m', '/dev/fake', 'unit', 'MiB', 'print',
use_standard_locale=True)
@mock.patch.object(disk_utils.LOG, 'warn')
def test_incorrect(self, log_mock, execute_mock):
output = """
BYT;
/dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:;
1:XX1076MiB:---:524MiB:ext4::boot;
"""
execute_mock.return_value = (output, '')
self.assertEqual([], disk_utils.list_partitions('/dev/fake'))
self.assertEqual(1, log_mock.call_count)
@mock.patch.object(disk_partitioner.DiskPartitioner, 'commit', lambda _: None)
class WorkOnDiskTestCase(test_base.BaseTestCase):
def setUp(self):
super(WorkOnDiskTestCase, self).setUp()
self.image_path = '/tmp/xyz/image'
self.root_mb = 128
self.swap_mb = 64
self.ephemeral_mb = 0
self.ephemeral_format = None
self.configdrive_mb = 0
self.dev = '/dev/fake'
self.swap_part = '/dev/fake-part1'
self.root_part = '/dev/fake-part2'
self.mock_ibd = mock.patch.object(disk_utils,
'is_block_device').start()
self.mock_mp = mock.patch.object(disk_utils,
'make_partitions').start()
self.addCleanup(self.mock_ibd.stop)
self.addCleanup(self.mock_mp.stop)
self.mock_remlbl = mock.patch.object(disk_utils,
'destroy_disk_metadata').start()
self.addCleanup(self.mock_remlbl.stop)
self.mock_mp.return_value = {'swap': self.swap_part,
'root': self.root_part}
def test_no_parent_device(self):
self.mock_ibd.return_value = False
self.assertRaises(exception.InstanceDeployFailure,
disk_utils.work_on_disk, self.dev,
self.root_mb, self.swap_mb, self.ephemeral_mb,
self.ephemeral_format, self.image_path, False)
self.mock_ibd.assert_called_once_with(self.dev)
self.assertFalse(self.mock_mp.called,
"make_partitions mock was unexpectedly called.")
def test_no_root_partition(self):
self.mock_ibd.side_effect = [True, False]
calls = [mock.call(self.dev),
mock.call(self.root_part)]
self.assertRaises(exception.InstanceDeployFailure,
disk_utils.work_on_disk, self.dev,
self.root_mb, self.swap_mb, self.ephemeral_mb,
self.ephemeral_format, self.image_path, False)
self.assertEqual(self.mock_ibd.call_args_list, calls)
self.mock_mp.assert_called_once_with(self.dev, self.root_mb,
self.swap_mb, self.ephemeral_mb,
self.configdrive_mb, commit=True)
def test_no_swap_partition(self):
self.mock_ibd.side_effect = [True, True, False]
calls = [mock.call(self.dev),
mock.call(self.root_part),
mock.call(self.swap_part)]
self.assertRaises(exception.InstanceDeployFailure,
disk_utils.work_on_disk, self.dev,
self.root_mb, self.swap_mb, self.ephemeral_mb,
self.ephemeral_format, self.image_path, False)
self.assertEqual(self.mock_ibd.call_args_list, calls)
self.mock_mp.assert_called_once_with(self.dev, self.root_mb,
self.swap_mb, self.ephemeral_mb,
self.configdrive_mb, commit=True)
def test_no_ephemeral_partition(self):
ephemeral_part = '/dev/fake-part1'
swap_part = '/dev/fake-part2'
root_part = '/dev/fake-part3'
ephemeral_mb = 256
ephemeral_format = 'exttest'
self.mock_mp.return_value = {'ephemeral': ephemeral_part,
'swap': swap_part,
'root': root_part}
self.mock_ibd.side_effect = [True, True, True, False]
calls = [mock.call(self.dev),
mock.call(root_part),
mock.call(swap_part),
mock.call(ephemeral_part)]
self.assertRaises(exception.InstanceDeployFailure,
disk_utils.work_on_disk, self.dev,
self.root_mb, self.swap_mb, ephemeral_mb,
ephemeral_format, self.image_path, False)
self.assertEqual(self.mock_ibd.call_args_list, calls)
self.mock_mp.assert_called_once_with(self.dev, self.root_mb,
self.swap_mb, ephemeral_mb,
self.configdrive_mb, commit=True)
@mock.patch.object(utils, 'unlink_without_raise')
@mock.patch.object(disk_utils, '_get_configdrive')
def test_no_configdrive_partition(self, mock_configdrive, mock_unlink):
mock_configdrive.return_value = (10, 'fake-path')
swap_part = '/dev/fake-part1'
configdrive_part = '/dev/fake-part2'
root_part = '/dev/fake-part3'
configdrive_url = 'http://1.2.3.4/cd'
configdrive_mb = 10
self.mock_mp.return_value = {'swap': swap_part,
'configdrive': configdrive_part,
'root': root_part}
self.mock_ibd.side_effect = [True, True, True, False]
calls = [mock.call(self.dev),
mock.call(root_part),
mock.call(swap_part),
mock.call(configdrive_part)]
self.assertRaises(exception.InstanceDeployFailure,
disk_utils.work_on_disk, self.dev,
self.root_mb, self.swap_mb, self.ephemeral_mb,
self.ephemeral_format, self.image_path, 'fake-uuid',
preserve_ephemeral=False,
configdrive=configdrive_url)
self.assertEqual(self.mock_ibd.call_args_list, calls)
self.mock_mp.assert_called_once_with(self.dev, self.root_mb,
self.swap_mb, self.ephemeral_mb,
configdrive_mb, commit=True)
mock_unlink.assert_called_once_with('fake-path')
@mock.patch.object(utils, 'execute')
class MakePartitionsTestCase(test_base.BaseTestCase):
def setUp(self):
super(MakePartitionsTestCase, self).setUp()
self.dev = 'fake-dev'
self.root_mb = 1024
self.swap_mb = 512
self.ephemeral_mb = 0
self.configdrive_mb = 0
self.parted_static_cmd = ['parted', '-a', 'optimal', '-s', self.dev,
'--', 'unit', 'MiB', 'mklabel', 'msdos']
def test_make_partitions(self, mock_exc):
mock_exc.return_value = (None, None)
disk_utils.make_partitions(self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb, self.configdrive_mb)
expected_mkpart = ['mkpart', 'primary', 'linux-swap', '1', '513',
'mkpart', 'primary', '', '513', '1537']
parted_cmd = self.parted_static_cmd + expected_mkpart
parted_call = mock.call(*parted_cmd, run_as_root=True,
check_exit_code=[0])
fuser_cmd = ['fuser', 'fake-dev']
fuser_call = mock.call(*fuser_cmd, run_as_root=True,
check_exit_code=[0, 1])
mock_exc.assert_has_calls([parted_call, fuser_call])
def test_make_partitions_with_ephemeral(self, mock_exc):
self.ephemeral_mb = 2048
expected_mkpart = ['mkpart', 'primary', '', '1', '2049',
'mkpart', 'primary', 'linux-swap', '2049', '2561',
'mkpart', 'primary', '', '2561', '3585']
cmd = self.parted_static_cmd + expected_mkpart
mock_exc.return_value = (None, None)
disk_utils.make_partitions(self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb, self.configdrive_mb)
parted_call = mock.call(*cmd, run_as_root=True, check_exit_code=[0])
mock_exc.assert_has_calls(parted_call)
@mock.patch.object(disk_utils, 'get_dev_block_size')
@mock.patch.object(utils, 'execute')
class DestroyMetaDataTestCase(test_base.BaseTestCase):
def setUp(self):
super(DestroyMetaDataTestCase, self).setUp()
self.dev = 'fake-dev'
self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
def test_destroy_disk_metadata(self, mock_exec, mock_gz):
mock_gz.return_value = 64
expected_calls = [mock.call('dd', 'if=/dev/zero', 'of=fake-dev',
'bs=512', 'count=36', run_as_root=True,
check_exit_code=[0]),
mock.call('dd', 'if=/dev/zero', 'of=fake-dev',
'bs=512', 'count=36', 'seek=28',
run_as_root=True,
check_exit_code=[0])]
disk_utils.destroy_disk_metadata(self.dev, self.node_uuid)
mock_exec.assert_has_calls(expected_calls)
self.assertTrue(mock_gz.called)
def test_destroy_disk_metadata_get_dev_size_fail(self, mock_exec, mock_gz):
mock_gz.side_effect = processutils.ProcessExecutionError
expected_call = [mock.call('dd', 'if=/dev/zero', 'of=fake-dev',
'bs=512', 'count=36', run_as_root=True,
check_exit_code=[0])]
self.assertRaises(processutils.ProcessExecutionError,
disk_utils.destroy_disk_metadata,
self.dev,
self.node_uuid)
mock_exec.assert_has_calls(expected_call)
def test_destroy_disk_metadata_dd_fail(self, mock_exec, mock_gz):
mock_exec.side_effect = processutils.ProcessExecutionError
expected_call = [mock.call('dd', 'if=/dev/zero', 'of=fake-dev',
'bs=512', 'count=36', run_as_root=True,
check_exit_code=[0])]
self.assertRaises(processutils.ProcessExecutionError,
disk_utils.destroy_disk_metadata,
self.dev,
self.node_uuid)
mock_exec.assert_has_calls(expected_call)
self.assertFalse(mock_gz.called)
@mock.patch.object(utils, 'execute')
class GetDeviceBlockSizeTestCase(test_base.BaseTestCase):
def setUp(self):
super(GetDeviceBlockSizeTestCase, self).setUp()
self.dev = 'fake-dev'
self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
def test_get_dev_block_size(self, mock_exec):
mock_exec.return_value = ("64", "")
expected_call = [mock.call('blockdev', '--getsz', self.dev,
run_as_root=True, check_exit_code=[0])]
disk_utils.get_dev_block_size(self.dev)
mock_exec.assert_has_calls(expected_call)
@mock.patch.object(disk_utils, 'dd')
@mock.patch.object(disk_utils, 'qemu_img_info')
@mock.patch.object(disk_utils, 'convert_image')
class PopulateImageTestCase(test_base.BaseTestCase):
def setUp(self):
super(PopulateImageTestCase, self).setUp()
def test_populate_raw_image(self, mock_cg, mock_qinfo, mock_dd):
type(mock_qinfo.return_value).file_format = mock.PropertyMock(
return_value='raw')
disk_utils.populate_image('src', 'dst')
mock_dd.assert_called_once_with('src', 'dst')
self.assertFalse(mock_cg.called)
def test_populate_qcow2_image(self, mock_cg, mock_qinfo, mock_dd):
type(mock_qinfo.return_value).file_format = mock.PropertyMock(
return_value='qcow2')
disk_utils.populate_image('src', 'dst')
mock_cg.assert_called_once_with('src', 'dst', 'raw', True)
self.assertFalse(mock_dd.called)
@mock.patch.object(disk_utils, 'is_block_device', lambda d: True)
@mock.patch.object(disk_utils, 'block_uuid', lambda p: 'uuid')
@mock.patch.object(disk_utils, 'dd', lambda *_: None)
@mock.patch.object(disk_utils, 'convert_image', lambda *_: None)
@mock.patch.object(utils, 'mkfs', lambda *_: None)
# NOTE(dtantsur): destroy_disk_metadata resets file size, disabling it
@mock.patch.object(disk_utils, 'destroy_disk_metadata', lambda *_: None)
class RealFilePartitioningTestCase(test_base.BaseTestCase):
"""This test applies some real-world partitioning scenario to a file.
This test covers the whole partitioning, mocking everything not possible
on a file. That helps us assure, that we do all partitioning math properly
and also conducts integration testing of DiskPartitioner.
"""
def setUp(self):
super(RealFilePartitioningTestCase, self).setUp()
# NOTE(dtantsur): no parted utility on gate-ironic-python26
try:
utils.execute('parted', '--version')
except OSError as exc:
self.skipTest('parted utility was not found: %s' % exc)
self.file = tempfile.NamedTemporaryFile(delete=False)
# NOTE(ifarkas): the file needs to be closed, so fuser won't report
# any usage
self.file.close()
# NOTE(dtantsur): 20 MiB file with zeros
utils.execute('dd', 'if=/dev/zero', 'of=%s' % self.file.name,
'bs=1', 'count=0', 'seek=20MiB')
@staticmethod
def _run_without_root(func, *args, **kwargs):
"""Make sure root is not required when using utils.execute."""
real_execute = utils.execute
def fake_execute(*cmd, **kwargs):
kwargs['run_as_root'] = False
return real_execute(*cmd, **kwargs)
with mock.patch.object(utils, 'execute', fake_execute):
return func(*args, **kwargs)
def test_different_sizes(self):
# NOTE(dtantsur): Keep this list in order with expected partitioning
fields = ['ephemeral_mb', 'swap_mb', 'root_mb']
variants = ((0, 0, 12), (4, 2, 8), (0, 4, 10), (5, 0, 10))
for variant in variants:
kwargs = dict(zip(fields, variant))
self._run_without_root(disk_utils.work_on_disk,
self.file.name, ephemeral_format='ext4',
node_uuid='', image_path='path', **kwargs)
part_table = self._run_without_root(
disk_utils.list_partitions, self.file.name)
for part, expected_size in zip(part_table, filter(None, variant)):
self.assertEqual(expected_size, part['size'],
"comparison failed for %s" % list(variant))
def test_whole_disk(self):
# 6 MiB ephemeral + 3 MiB swap + 9 MiB root + 1 MiB for MBR
# + 1 MiB MAGIC == 20 MiB whole disk
# TODO(dtantsur): figure out why we need 'magic' 1 more MiB
# and why the is different on Ubuntu and Fedora (see below)
self._run_without_root(disk_utils.work_on_disk, self.file.name,
root_mb=9, ephemeral_mb=6, swap_mb=3,
ephemeral_format='ext4', node_uuid='',
image_path='path')
part_table = self._run_without_root(
disk_utils.list_partitions, self.file.name)
sizes = [part['size'] for part in part_table]
# NOTE(dtantsur): parted in Ubuntu 12.04 will occupy the last MiB,
# parted in Fedora 20 won't - thus two possible variants for last part
self.assertEqual([6, 3], sizes[:2],
"unexpected partitioning %s" % part_table)
self.assertIn(sizes[2], (9, 10))
@mock.patch.object(shutil, 'copyfileobj')
@mock.patch.object(requests, 'get')
class GetConfigdriveTestCase(test_base.BaseTestCase):
@mock.patch.object(gzip, 'GzipFile')
def test_get_configdrive(self, mock_gzip, mock_requests, mock_copy):
mock_requests.return_value = mock.MagicMock(content='Zm9vYmFy')
disk_utils._get_configdrive('http://1.2.3.4/cd',
'fake-node-uuid')
mock_requests.assert_called_once_with('http://1.2.3.4/cd')
mock_gzip.assert_called_once_with('configdrive', 'rb',
fileobj=mock.ANY)
mock_copy.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(gzip, 'GzipFile')
def test_get_configdrive_base64_string(self, mock_gzip, mock_requests,
mock_copy):
disk_utils._get_configdrive('Zm9vYmFy', 'fake-node-uuid')
self.assertFalse(mock_requests.called)
mock_gzip.assert_called_once_with('configdrive', 'rb',
fileobj=mock.ANY)
mock_copy.assert_called_once_with(mock.ANY, mock.ANY)
def test_get_configdrive_bad_url(self, mock_requests, mock_copy):
mock_requests.side_effect = requests.exceptions.RequestException
self.assertRaises(exception.InstanceDeployFailure,
disk_utils._get_configdrive,
'http://1.2.3.4/cd', 'fake-node-uuid')
self.assertFalse(mock_copy.called)
@mock.patch.object(base64, 'b64decode')
def test_get_configdrive_base64_error(self, mock_b64, mock_requests,
mock_copy):
mock_b64.side_effect = TypeError
self.assertRaises(exception.InstanceDeployFailure,
disk_utils._get_configdrive,
'malformed', 'fake-node-uuid')
mock_b64.assert_called_once_with('malformed')
self.assertFalse(mock_copy.called)
@mock.patch.object(gzip, 'GzipFile')
def test_get_configdrive_gzip_error(self, mock_gzip, mock_requests,
mock_copy):
mock_requests.return_value = mock.MagicMock(content='Zm9vYmFy')
mock_copy.side_effect = IOError
self.assertRaises(exception.InstanceDeployFailure,
disk_utils._get_configdrive,
'http://1.2.3.4/cd', 'fake-node-uuid')
mock_requests.assert_called_once_with('http://1.2.3.4/cd')
mock_gzip.assert_called_once_with('configdrive', 'rb',
fileobj=mock.ANY)
mock_copy.assert_called_once_with(mock.ANY, mock.ANY)
| {
"content_hash": "938093718c9ccb314cfa61fe6e9c69ee",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 79,
"avg_line_length": 45.89252336448598,
"alnum_prop": 0.568272070053966,
"repo_name": "faizan-barmawer/elytics",
"id": "e1c8db8986fea4e7dac85d4f68f580950151e143",
"size": "20271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ironic_lib/test_disk_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "578"
},
{
"name": "Python",
"bytes": "82967"
}
],
"symlink_target": ""
} |
import ps_list
import ps_form
#.apidoc title: Printscreen Support
""" A special report, that is automatically formatted to look like the
screen contents of Form/List Views.
"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "616867b222c6757391f7781098d1e200",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 70,
"avg_line_length": 22.818181818181817,
"alnum_prop": 0.7609561752988048,
"repo_name": "chjw8016/GreenOdoo7-haibao",
"id": "e940a7e467c6ea63ccca8846621722acadf02c88",
"size": "1239",
"binary": false,
"copies": "76",
"ref": "refs/heads/master",
"path": "openerp/report/printscreen/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "90846"
},
{
"name": "CSS",
"bytes": "384369"
},
{
"name": "JavaScript",
"bytes": "1730589"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9394626"
},
{
"name": "Shell",
"bytes": "5172"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
} |
import datetime
import getpass
import glob
import logging
import multiprocessing
import os
import platform
import subprocess
import sys
import tempfile
import time
# Reconsider before importing Buck modules
# Timeout in sec for filesystem-related system calls
_FS_TIMEOUT_S = 10
def _te(q, f, args):
try:
ret = f(*args)
q.put(ret)
except Exception as e:
q.put(e)
def timed_exec(func, args=(), timeout_s=None):
"""
Could raise TimeoutError in addition to ones func could raise
"""
q = multiprocessing.Queue()
p = multiprocessing.Process(target=_te, args=(q, func, args))
p.start()
p.join(timeout_s)
p.terminate()
if p.exitcode == 0:
ret = q.get_nowait()
if isinstance(ret, Exception):
raise ret
return ret
raise TimeoutError
def fs_safe_getcwd():
cwd = None
try:
cwd = timed_exec(os.getcwd, timeout_s=_FS_TIMEOUT_S)
except FileNotFoundError:
logging.error("os.getcwd failed: CWD disappeared")
except TimeoutError:
logging.error("os.getcwd failed: timed out")
return cwd
def fs_safe_path_exists(path):
ret = None
try:
ret = timed_exec(os.path.exists, (path,), _FS_TIMEOUT_S)
except TimeoutError:
logging.error("os.path.exists failed: timed out")
return ret
def _fsw(path, data):
with open(path, "w") as f:
return f.write(data)
def fs_safe_write(path, data):
ret = -1
try:
ret = timed_exec(_fsw, (path, data), _FS_TIMEOUT_S)
except TimeoutError:
logging.error("write failed: timed out")
except OSError:
logging.error("write failed: I/O error")
return ret
def fs_safe_makedirs(name, mode=0o777, exist_ok=False):
ret = False
try:
timed_exec(os.makedirs, (name, mode, exist_ok), _FS_TIMEOUT_S)
ret = True
except FileNotFoundError:
logging.error("os.makedirs failed: CWD disappeared")
except TimeoutError:
logging.error("os.makedirs failed: timed out")
return ret
def get_project_root_abspath(key_file=".buckconfig"):
"""
Return abs path to project root as string, or None if not found.
"""
current_dir = fs_safe_getcwd()
if current_dir is None:
return None
at_root_dir = False
while not at_root_dir:
exists = fs_safe_path_exists(os.path.join(current_dir, key_file))
if exists is None:
# Filesystem is unhealthy. Quit searching
return None
if exists:
return current_dir
parent_dir = os.path.dirname(current_dir)
at_root_dir = current_dir == parent_dir
current_dir = parent_dir
return None
def time_prefix():
"""
Return string 'yyyy-mm-dd.hh-mm-ss.us'
"""
return datetime.datetime.now().strftime("%Y-%m-%d.%H-%M-%S.%f")
def _setup_logging():
"""
Equivalent of programs.buck_logging.setup_logging.
Should be called only when buck_sosreport.py is directly invoked.
"""
level_name = os.environ.get("BUCK_WRAPPER_LOG_LEVEL", "INFO")
level_name_to_level = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"NOTSET": logging.NOTSET,
}
level = level_name_to_level.get(level_name.upper(), logging.INFO)
if level == logging.INFO:
format_string = "%(message)s"
else:
format_string = (
"%(asctime)s [%(levelname)s][%(filename)s:%(lineno)d] %(message)s"
)
logging.basicConfig(level=level, format=format_string)
class SOSReportExitCode:
"""
Equivalent of programs.buck_tool.ExitCode
"""
SUCCESS = 0
COMMANDLINE_ERROR = 3
FATAL_GENERIC = 10
FATAL_BOOTSTRAP = 11
FATAL_IO = 13
FATAL_DISK_FULL = 14
FIX_FAILED = 16
SIGNAL_INTERRUPT = 130
SIGNAL_PIPE = 141
class SOSReport:
def __init__(self, absroot=None):
self.absroot = absroot or get_project_root_abspath()
if self.absroot is not None:
self.buckd_dir = os.path.join(self.absroot, ".buckd")
self.buckd_logdir = os.path.join(self.absroot, "buck-out", "log")
self.sos_outdir = os.path.join(
self.buckd_logdir, "%s_sosreport" % time_prefix()
)
self.pfname = platform.system()
def _get_buckd_pids(self):
"""
Return PIDs of current project's buckd, or [] on failure.
"""
pids = []
key_clsname = "com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper"
key_buckdcfg = "-Dbuck.buckd_dir=%s" % self.buckd_dir
try:
res = subprocess.run(["ps", "auxww"], capture_output=True)
if res.returncode == 0:
for line in res.stdout.splitlines():
# make a list of ["user", "pid", "rest..."]
lline = line.strip().split(maxsplit=2)
if len(lline) < 3:
logging.warning(
"_get_buckd_pids found an irregular line of ps. Skipping: %s",
line.decode(errors="replace"),
)
continue
cmdargs = lline[-1].strip().split()
if (
key_clsname.encode() in cmdargs
and key_buckdcfg.encode() in cmdargs
):
try:
pid = int(lline[1])
pids.append(pid)
except ValueError as e:
logging.warning(
"_get_buckd_pids found an irregular line of ps (%s): %s",
line.decode(errors="replace"),
repr(e),
)
else:
logging.warning(
"ps command failed: %s", repr(res.stderr.decode(errors="replace"))
)
except FileNotFoundError as e:
logging.error("_get_buckd_pids failed to get Buckd PIDs: %s", repr(e))
return pids
def _get_jstack_once(self, pids, ntry=0):
for pid in pids:
filename = os.path.join(
self.sos_outdir, "%s_jstack_%d_%d" % (time_prefix(), pid, ntry)
)
try:
res = subprocess.run(["jstack", "-l", str(pid)], capture_output=True)
if res.returncode == 0:
written = fs_safe_write(
filename, res.stdout.decode(errors="replace")
)
if written < 0:
logging.error("Failed to log %s", filename)
else:
logging.warning(
"jstack command failed: %s",
repr(res.stderr.decode(errors="replace")),
)
except FileNotFoundError as e:
logging.error("_get_jstack_once failed to get jstack: %s", repr(e))
def _get_jstack(self, count=10, interval_s=5):
for n in range(count):
logging.info(
"Getting jstack (%d/%d) with interval %d sec", n + 1, count, interval_s
)
self._get_jstack_once(self._get_buckd_pids(), n)
time.sleep(interval_s)
def _cmd_dump_common(self, cmd):
"""
Run cmd (e.g. ["echo", "hi"]) and dump its stdout/stderr on a file.
Be sure not to run filesystem-related command such as 'ls'. Instead,
define a fs-safe function (see fs_safe_* for example) for the operation.
"""
filename = os.path.join(
self.sos_outdir, "%s_%s" % (time_prefix(), "_".join(cmd))
)
logging.info("Getting '%s'", " ".join(cmd))
try:
res = subprocess.run(cmd, capture_output=True)
out = (
"stderr:\n"
+ res.stderr.decode(errors="replace")
+ "\n\nstdout:\n"
+ res.stdout.decode(errors="replace")
)
written = fs_safe_write(filename, out)
if written < 0:
logging.error("Failed to log %s", filename)
if res.returncode != 0:
logging.warning(
"'%s' finished with error/warning: %s",
" ".join(cmd),
repr(res.stderr.decode(errors="replace")),
)
except FileNotFoundError as e:
logging.error("'%s' failed: %s", " ".join(cmd), repr(e))
def _get_df(self):
self._cmd_dump_common(["df", "-h"])
def _get_mount(self):
self._cmd_dump_common(["mount"])
def _get_ifconfig(self):
self._cmd_dump_common(["ifconfig", "-a"])
def _get_uptime(self):
self._cmd_dump_common(["uptime"])
def _get_ps(self):
self._cmd_dump_common(["ps", "auxww"])
if self.pfname == "Linux":
self._cmd_dump_common(["ps", "auxww", "-L"])
elif self.pfname == "Darwin":
self._cmd_dump_common(["ps", "auxww", "-M"])
def _get_netstat(self):
if self.pfname == "Linux":
self._cmd_dump_common(["netstat", "-nap"])
elif self.pfname == "Darwin":
self._cmd_dump_common(["netstat", "-nav"])
def _get_top(self):
if self.pfname == "Linux":
self._cmd_dump_common(["top", "-b", "-n", "1"])
elif self.pfname == "Darwin":
self._cmd_dump_common(["top", "-l", "1"])
def _get_lsof(self):
cmd = ["lsof"]
try:
uid = getpass.getuser()
cmd.extend(["-u", uid])
except KeyError:
pass
self._cmd_dump_common(cmd)
def _get_daemon_status(self):
self._cmd_dump_common(["edenfsctl", "status"])
self._cmd_dump_common(["watchman", "debug-status"])
def _get_pstree(self):
if self.pfname == "Linux":
self._cmd_dump_common(["pstree", "-a", "-A", "-g", "-l", "-p", "-t", "-u"])
elif self.pfname == "Darwin":
# get away with PPID of 'ps -ef'
self._cmd_dump_common(["ps", "-ef"])
def _get_varlogs(self):
filename = os.path.join(
self.sos_outdir, "%s_%s" % (time_prefix(), "varlogs.tar.gz")
)
logging.info("Getting varlogs")
logfiles = []
if self.pfname == "Linux":
logfiles = glob.glob("/var/log/messages*") + glob.glob("/var/log/syslog*")
elif self.pfname == "Darwin":
logfiles = glob.glob("/var/log/system.log*")
if logfiles:
try:
res = subprocess.run(
["tar", "zcvf", filename] + logfiles, capture_output=True
)
if res.returncode != 0:
logging.warning(
"tar command failed: %s",
repr(res.stderr.decode(errors="replace")),
)
except FileNotFoundError as e:
logging.error("_get_varlogs failed: %s", repr(e))
else:
logging.warning("no logfile found to report")
def gen_report(self):
"""
Generate SOS report under log directory.
Return SOSReportexitcode and message as tupple.
"""
if self.absroot is None:
msg = "Failed to identify Buck project"
logging.error(msg)
return SOSReportExitCode.COMMANDLINE_ERROR, msg
if os.name == "nt":
# TODO: windows platform version
msg = "sosreport is not implemented on: %s" % os.name
logging.error(msg)
return SOSReportExitCode.COMMANDLINE_ERROR, msg
makedirs = fs_safe_makedirs(self.sos_outdir, exist_ok=True)
if not makedirs:
tmpdir = os.path.join(tempfile.gettempdir(), "%s_sosreport" % time_prefix())
makedirs = fs_safe_makedirs(tmpdir, exist_ok=True)
if not makedirs:
msg = "Failed to find a location to store reports"
logging.error(msg)
return SOSReportExitCode.FATAL_BOOTSTRAP, msg
logging.warning(
"Failed to locate %s. Falling back to tempdir %s to store reports",
self.sos_outdir,
tmpdir,
)
self.sos_outdir = tmpdir
logging.info("Generating report under %s", self.sos_outdir)
self._get_jstack()
self._get_df()
self._get_mount()
self._get_ps()
self._get_netstat()
self._get_lsof()
self._get_uptime()
self._get_top()
self._get_ifconfig()
self._get_daemon_status()
self._get_pstree()
self._get_varlogs()
# TODO: other metrics
return SOSReportExitCode.SUCCESS, "success"
def sosreport_main(proj_root=None):
logging.info("Running sosreport..")
sr = SOSReport(proj_root)
if sr.absroot is None:
logging.error("Failed to identify Buck project")
retcode = SOSReportExitCode.FATAL_BOOTSTRAP
msg = "Failed to identify Buck project"
else:
logging.info("Making report on the following project: %s", sr.absroot)
retcode, msg = sr.gen_report()
logging.info('sosreport finished with msg: "%s" and retcode: %d', msg, retcode)
return retcode
if __name__ == "__main__":
_setup_logging()
if len(sys.argv) > 2:
# assume sys.argv[2] is "/full/path/to/project_root"
sys.exit(sosreport_main(sys.argv[2]))
else:
sys.exit(sosreport_main())
| {
"content_hash": "7cb80f8226beb43737ffdb6944656ae9",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 90,
"avg_line_length": 32.8732057416268,
"alnum_prop": 0.5297285495960993,
"repo_name": "JoelMarcey/buck",
"id": "ffd74909d1c497d0b6d04d18e1ec2e78fa602f64",
"size": "14339",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "programs/buck_sosreport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "579"
},
{
"name": "Batchfile",
"bytes": "2093"
},
{
"name": "C",
"bytes": "255521"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "10992"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Go",
"bytes": "16819"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "6115"
},
{
"name": "Haskell",
"bytes": "895"
},
{
"name": "IDL",
"bytes": "385"
},
{
"name": "Java",
"bytes": "19430296"
},
{
"name": "JavaScript",
"bytes": "932672"
},
{
"name": "Kotlin",
"bytes": "2079"
},
{
"name": "Lex",
"bytes": "2731"
},
{
"name": "Makefile",
"bytes": "1816"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4384"
},
{
"name": "Objective-C",
"bytes": "138150"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "244"
},
{
"name": "Prolog",
"bytes": "858"
},
{
"name": "Python",
"bytes": "1786899"
},
{
"name": "Roff",
"bytes": "1109"
},
{
"name": "Rust",
"bytes": "3618"
},
{
"name": "Scala",
"bytes": "4906"
},
{
"name": "Shell",
"bytes": "49876"
},
{
"name": "Smalltalk",
"bytes": "3355"
},
{
"name": "Standard ML",
"bytes": "15"
},
{
"name": "Swift",
"bytes": "6897"
},
{
"name": "Thrift",
"bytes": "26256"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
"""Support for Xiaomi Mi Air Quality Monitor (PM2.5)."""
from dataclasses import dataclass
import logging
from miio import AirQualityMonitor # pylint: disable=import-error
from miio import DeviceException
from miio.gateway import (
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
GATEWAY_MODEL_EU,
GatewayException,
)
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_TOKEN,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from .const import CONF_FLOW_TYPE, CONF_GATEWAY, DOMAIN, KEY_COORDINATOR
from .gateway import XiaomiGatewayDevice
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Sensor"
DATA_KEY = "sensor.xiaomi_miio"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
ATTR_POWER = "power"
ATTR_CHARGING = "charging"
ATTR_BATTERY_LEVEL = "battery_level"
ATTR_DISPLAY_CLOCK = "display_clock"
ATTR_NIGHT_MODE = "night_mode"
ATTR_NIGHT_TIME_BEGIN = "night_time_begin"
ATTR_NIGHT_TIME_END = "night_time_end"
ATTR_SENSOR_STATE = "sensor_state"
ATTR_MODEL = "model"
SUCCESS = ["ok"]
@dataclass
class SensorType:
"""Class that holds device specific info for a xiaomi aqara sensor."""
unit: str = None
icon: str = None
device_class: str = None
GATEWAY_SENSOR_TYPES = {
"temperature": SensorType(
unit=TEMP_CELSIUS, icon=None, device_class=DEVICE_CLASS_TEMPERATURE
),
"humidity": SensorType(
unit=PERCENTAGE, icon=None, device_class=DEVICE_CLASS_HUMIDITY
),
"pressure": SensorType(
unit=PRESSURE_HPA, icon=None, device_class=DEVICE_CLASS_PRESSURE
),
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Xiaomi sensor from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_GATEWAY:
gateway = hass.data[DOMAIN][config_entry.entry_id][CONF_GATEWAY]
# Gateway illuminance sensor
if gateway.model not in [
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
GATEWAY_MODEL_EU,
]:
entities.append(
XiaomiGatewayIlluminanceSensor(
gateway, config_entry.title, config_entry.unique_id
)
)
# Gateway sub devices
sub_devices = gateway.devices
coordinator = hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR]
for sub_device in sub_devices.values():
sensor_variables = set(sub_device.status) & set(GATEWAY_SENSOR_TYPES)
if sensor_variables:
entities.extend(
[
XiaomiGatewaySensor(
coordinator, sub_device, config_entry, variable
)
for variable in sensor_variables
]
)
async_add_entities(entities, update_before_add=True)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the sensor from config."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
try:
air_quality_monitor = AirQualityMonitor(host, token)
device_info = await hass.async_add_executor_job(air_quality_monitor.info)
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
device = XiaomiAirQualityMonitor(name, air_quality_monitor, model, unique_id)
except DeviceException as ex:
raise PlatformNotReady from ex
hass.data[DATA_KEY][host] = device
async_add_entities([device], update_before_add=True)
class XiaomiAirQualityMonitor(Entity):
"""Representation of a Xiaomi Air Quality Monitor."""
def __init__(self, name, device, model, unique_id):
"""Initialize the entity."""
self._name = name
self._device = device
self._model = model
self._unique_id = unique_id
self._icon = "mdi:cloud"
self._unit_of_measurement = "AQI"
self._available = None
self._state = None
self._state_attrs = {
ATTR_POWER: None,
ATTR_BATTERY_LEVEL: None,
ATTR_CHARGING: None,
ATTR_DISPLAY_CLOCK: None,
ATTR_NIGHT_MODE: None,
ATTR_NIGHT_TIME_BEGIN: None,
ATTR_NIGHT_TIME_END: None,
ATTR_SENSOR_STATE: None,
ATTR_MODEL: self._model,
}
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.aqi
self._state_attrs.update(
{
ATTR_POWER: state.power,
ATTR_CHARGING: state.usb_power,
ATTR_BATTERY_LEVEL: state.battery,
ATTR_DISPLAY_CLOCK: state.display_clock,
ATTR_NIGHT_MODE: state.night_mode,
ATTR_NIGHT_TIME_BEGIN: state.night_time_begin,
ATTR_NIGHT_TIME_END: state.night_time_end,
ATTR_SENSOR_STATE: state.sensor_state,
}
)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
class XiaomiGatewaySensor(XiaomiGatewayDevice):
"""Representation of a XiaomiGatewaySensor."""
def __init__(self, coordinator, sub_device, entry, data_key):
"""Initialize the XiaomiSensor."""
super().__init__(coordinator, sub_device, entry)
self._data_key = data_key
self._unique_id = f"{sub_device.sid}-{data_key}"
self._name = f"{data_key} ({sub_device.sid})".capitalize()
@property
def icon(self):
"""Return the icon to use in the frontend."""
return GATEWAY_SENSOR_TYPES[self._data_key].icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return GATEWAY_SENSOR_TYPES[self._data_key].unit
@property
def device_class(self):
"""Return the device class of this entity."""
return GATEWAY_SENSOR_TYPES[self._data_key].device_class
@property
def state(self):
"""Return the state of the sensor."""
return self._sub_device.status[self._data_key]
class XiaomiGatewayIlluminanceSensor(Entity):
"""Representation of the gateway device's illuminance sensor."""
def __init__(self, gateway_device, gateway_name, gateway_device_id):
"""Initialize the entity."""
self._gateway = gateway_device
self._name = f"{gateway_name} Illuminance"
self._gateway_device_id = gateway_device_id
self._unique_id = f"{gateway_device_id}-illuminance"
self._available = False
self._state = None
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def device_info(self):
"""Return the device info of the gateway."""
return {"identifiers": {(DOMAIN, self._gateway_device_id)}}
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return LIGHT_LUX
@property
def device_class(self):
"""Return the device class of this entity."""
return DEVICE_CLASS_ILLUMINANCE
@property
def state(self):
"""Return the state of the device."""
return self._state
async def async_update(self):
"""Fetch state from the device."""
try:
self._state = await self.hass.async_add_executor_job(
self._gateway.get_illumination
)
self._available = True
except GatewayException as ex:
if self._available:
self._available = False
_LOGGER.error(
"Got exception while fetching the gateway illuminance state: %s", ex
)
| {
"content_hash": "93091cf0d90e4bdbc582458f737db4cc",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 88,
"avg_line_length": 30.975683890577507,
"alnum_prop": 0.602983024237072,
"repo_name": "turbokongen/home-assistant",
"id": "821fe164ea97f56269d63660cbe70fddc13cb2cd",
"size": "10191",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/xiaomi_miio/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "30405146"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ServiceLinkerManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ServiceLinkerManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:keyword api_version: Api Version. Default value is "2022-05-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
**kwargs: Any
) -> None:
super(ServiceLinkerManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2022-05-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-servicelinker/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| {
"content_hash": "fbcdb194672ba3a36f6399303f621b8b",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 125,
"avg_line_length": 47.59322033898305,
"alnum_prop": 0.7104700854700855,
"repo_name": "Azure/azure-sdk-for-python",
"id": "3001ed6a9b0d83bd9419817cb047b83aeb7dda61",
"size": "3276",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servicelinker/azure-mgmt-servicelinker/azure/mgmt/servicelinker/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
import six
import webob
from nova.api.openstack.compute import server_metadata \
as server_metadata_v21
from nova.compute import vm_states
import nova.db.api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CONF = cfg.CONF
def return_create_instance_metadata_max(context, server_id, metadata, delete):
return stub_max_server_metadata()
def return_create_instance_metadata(context, server_id, metadata, delete):
return stub_server_metadata()
def fake_instance_save(inst, **kwargs):
inst.metadata = stub_server_metadata()
inst.obj_reset_changes()
def return_server_metadata(context, server_id):
if not isinstance(server_id, six.string_types) or not len(server_id) == 36:
msg = 'id %s must be a uuid in return server metadata' % server_id
raise Exception(msg)
return stub_server_metadata()
def return_empty_server_metadata(context, server_id):
return {}
def delete_server_metadata(context, server_id, key):
pass
def stub_server_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_max_server_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota.metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'launched_at': timeutils.utcnow(),
'metadata': stub_server_metadata(),
'vm_state': vm_states.ACTIVE})
def return_server_nonexistent(context, server_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=server_id)
def fake_change_instance_metadata(self, context, instance, diff):
pass
class ServerMetaDataTestV21(test.TestCase):
validation_ex = exception.ValidationError
validation_ex_large = validation_ex
def setUp(self):
super(ServerMetaDataTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self)
self.stub_out('nova.db.api.instance_get', return_server)
self.stub_out('nova.db.api.instance_get_by_uuid',
return_server_by_uuid)
self.stub_out('nova.db.api.instance_metadata_get',
return_server_metadata)
self.stub_out(
'nova.compute.rpcapi.ComputeAPI.change_instance_metadata',
fake_change_instance_metadata)
self._set_up_resources()
def _set_up_resources(self):
self.controller = server_metadata_v21.ServerMetadataController()
self.uuid = uuids.fake
self.url = '/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequestV21.blank(self.url + param_url)
def test_index(self):
req = self._get_request()
res_dict = self.controller.index(req, self.uuid)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_server(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_server_nonexistent)
req = self._get_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request()
res_dict = self.controller.index(req, self.uuid)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = self._get_request('/key2')
res_dict = self.controller.show(req, self.uuid, 'key2')
expected = {"meta": {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_server(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_server_nonexistent)
req = self._get_request('/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key2')
def test_show_meta_not_found(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request('/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key6')
def test_delete(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_server_metadata)
self.stub_out('nova.db.api.instance_metadata_delete',
delete_server_metadata)
req = self._get_request('/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.uuid, 'key2')
self.assertIsNone(res)
def test_delete_nonexistent_server(self):
self.stub_out('nova.db.api.instance_get_by_uuid',
return_server_nonexistent)
req = self._get_request('/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key1')
def test_delete_meta_not_found(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request('/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key6')
def test_create(self):
self.stub_out('nova.objects.Instance.save', fake_instance_save)
req = self._get_request()
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
res_dict = self.controller.create(req, self.uuid, body=body)
body['metadata'].update({
"key1": "value1",
"key2": "value2",
"key3": "value3",
})
self.assertEqual(body, res_dict)
def test_create_empty_body(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=None)
def test_create_item_empty_key(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_item_non_dict(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": None}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_item_key_too_long(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": {("a" * 260): "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex_large,
self.controller.create,
req, self.uuid, body=body)
def test_create_malformed_container(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_malformed_data(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"metadata": ['asdf']}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_nonexistent_server(self):
self.stub_out('nova.db.api.instance_get_by_uuid',
return_server_nonexistent)
req = self._get_request()
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.uuid, body=body)
def test_update_metadata(self):
self.stub_out('nova.objects.Instance.save', fake_instance_save)
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'key1': 'updatedvalue',
'key29': 'newkey',
}
}
req.body = jsonutils.dump_as_bytes(expected)
response = self.controller.update_all(req, self.uuid, body=expected)
self.assertEqual(expected, response)
def test_update_all(self):
self.stub_out('nova.objects.Instance.save', fake_instance_save)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
},
}
req.body = jsonutils.dump_as_bytes(expected)
res_dict = self.controller.update_all(req, self.uuid, body=expected)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stub_out('nova.objects.Instance.save', fake_instance_save)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dump_as_bytes(expected)
res_dict = self.controller.update_all(req, self.uuid, body=expected)
self.assertEqual(expected, res_dict)
def test_update_all_empty_body_item(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=None)
def test_update_all_with_non_dict_item(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"metadata": None}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=body)
def test_update_all_malformed_container(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=expected)
def test_update_all_malformed_data(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=expected)
def test_update_all_nonexistent_server(self):
self.stub_out('nova.db.api.instance_get', return_server_nonexistent)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
def test_update_all_non_dict(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
body = {"metadata": None}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex, self.controller.update_all,
req, self.uuid, body=body)
def test_update_item(self):
self.stub_out('nova.objects.Instance.save', fake_instance_save)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.uuid, 'key1', body=body)
expected = {"meta": {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_server(self):
self.stub_out('nova.db.api.instance_get_by_uuid',
return_server_nonexistent)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.uuid, 'key1',
body=body)
def test_update_item_empty_body(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=None)
def test_update_malformed_container(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'meta': {}}
req.body = jsonutils.dump_as_bytes(expected)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=expected)
def test_update_malformed_data(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'metadata': ['asdf']}
req.body = jsonutils.dump_as_bytes(expected)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=expected)
def test_update_item_empty_key(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, '',
body=body)
def test_update_item_key_too_long(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex_large,
self.controller.update,
req, self.uuid, ("a" * 260), body=body)
def test_update_item_value_too_long(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex_large,
self.controller.update,
req, self.uuid, "key1", body=body)
def test_update_item_too_many_keys(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'key1',
body=body)
def test_update_item_body_uri_mismatch(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.uuid, 'bad',
body=body)
def test_update_item_non_dict(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/bad')
req.method = 'PUT'
body = {"meta": None}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'bad',
body=body)
def test_update_empty_container(self):
self.stub_out('nova.db.instance_metadata_update',
return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'metadata': {}}
req.body = jsonutils.dump_as_bytes(expected)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.update, req, self.uuid, 'bad',
body=expected)
def test_too_many_metadata_items_on_create(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota.metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = self._get_request()
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, self.uuid, body=data)
def test_invalid_metadata_items_on_create(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.headers["content-type"] = "application/json"
# test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex_large,
self.controller.create, req, self.uuid, body=data)
# test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex_large,
self.controller.create, req, self.uuid, body=data)
# test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=data)
def test_too_many_metadata_items_on_update_item(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota.metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = self._get_request()
req.method = 'PUT'
req.body = jsonutils.dump_as_bytes(data)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all,
req, self.uuid, body=data)
def test_invalid_metadata_items_on_update_item(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota.metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = self._get_request()
req.method = 'PUT'
req.body = jsonutils.dump_as_bytes(data)
req.headers["content-type"] = "application/json"
# test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex_large,
self.controller.update_all, req, self.uuid,
body=data)
# test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex_large,
self.controller.update_all, req, self.uuid,
body=data)
# test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(self.validation_ex,
self.controller.update_all, req, self.uuid,
body=data)
class BadStateServerMetaDataTestV21(test.TestCase):
def setUp(self):
super(BadStateServerMetaDataTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self)
self.stub_out('nova.db.api.instance_metadata_get',
return_server_metadata)
self.stub_out(
'nova.compute.rpcapi.ComputeAPI.change_instance_metadata',
fake_change_instance_metadata)
self.stub_out('nova.db.api.instance_get', self._return_server_in_build)
self.stub_out('nova.db.api.instance_get_by_uuid',
self._return_server_in_build_by_uuid)
self.stub_out('nova.db.api.instance_metadata_delete',
delete_server_metadata)
self._set_up_resources()
def _set_up_resources(self):
self.controller = server_metadata_v21.ServerMetadataController()
self.uuid = uuids.fake
self.url = '/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequestV21.blank(self.url + param_url)
def test_invalid_state_on_delete(self):
req = self._get_request('/key2')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
req, self.uuid, 'key2')
def test_invalid_state_on_update_metadata(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'key1': 'updatedvalue',
'key29': 'newkey',
}
}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
req, self.uuid, body=expected)
def _return_server_in_build(self, context, server_id,
columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'vm_state': vm_states.BUILDING})
def _return_server_in_build_by_uuid(self, context, server_uuid,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'locked': False,
'vm_state': vm_states.BUILDING})
@mock.patch.object(nova.compute.api.API, 'update_instance_metadata',
side_effect=exception.InstanceIsLocked(instance_uuid=0))
def test_instance_lock_update_metadata(self, mock_update):
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
expected = {
'metadata': {
'keydummy': 'newkey',
}
}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
req, self.uuid, body=expected)
class ServerMetaPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ServerMetaPolicyEnforcementV21, self).setUp()
self.controller = server_metadata_v21.ServerMetadataController()
self.req = fakes.HTTPRequest.blank('')
def test_create_policy_failed(self):
rule_name = "os_compute_api:server-metadata:create"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, fakes.FAKE_UUID,
body={'metadata': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
rule_name = "os_compute_api:server-metadata:index"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_update_policy_failed(self):
rule_name = "os_compute_api:server-metadata:update"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID,
body={'meta': {'fake_meta': 'fake_meta'}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_update_all_policy_failed(self):
rule_name = "os_compute_api:server-metadata:update_all"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update_all, self.req, fakes.FAKE_UUID,
body={'metadata': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_policy_failed(self):
rule_name = "os_compute_api:server-metadata:delete"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:server-metadata:show"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| {
"content_hash": "b1c3da5d72eb7df30ba216d0682c51f6",
"timestamp": "",
"source": "github",
"line_count": 800,
"max_line_length": 79,
"avg_line_length": 39.13125,
"alnum_prop": 0.5790768247883724,
"repo_name": "gooddata/openstack-nova",
"id": "a03a42aeb6e288b1a10045654cf48ed57558bce4",
"size": "31941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/test_server_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3858"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "43584"
},
{
"name": "Python",
"bytes": "23012372"
},
{
"name": "Shell",
"bytes": "32567"
},
{
"name": "Smarty",
"bytes": "429290"
}
],
"symlink_target": ""
} |
"""
So why would we use a decorator?
This example is fairly contrived, but it puts some control flow
into a decorator to catch extra input that the sample() function
isn't written to accept.
"""
def mydecorator(func):
""" A decorator that simply passes through a function.
"""
print "Decorator is go!"
def function_wrapper(*args, **kwargs):
"""
We'll need to run the function in the wrapper in order
to extend the functionality and/or modify the output.
"""
# Lets intercept the return if there are too many values.
# I have been explicit in this compound if statement, but
# remember an empty dict/list is false, so there's a better way.
if len(args) > 0 or len(kwargs.keys()) > 0:
return func()
else:
return func(*args, **kwargs)
return function_wrapper
# Using the decorator.
@mydecorator
def sample():
print "instantiating sample function"
return 'sample'
# This would break if it wasn't wrapped/decorated!
print sample('too', 'many', 'arguments')
| {
"content_hash": "befa08001a9f7b1952e21c701f43cbdf",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 28.55263157894737,
"alnum_prop": 0.6516129032258065,
"repo_name": "razzius/PyClassLessons",
"id": "f5be97589c1adc9c0d2d5f0c33cea15b2946912c",
"size": "1085",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "instructors/lessons/higher_order_functions/examples/decorator3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3255"
},
{
"name": "HTML",
"bytes": "523249"
},
{
"name": "Jupyter Notebook",
"bytes": "480354"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "Perl",
"bytes": "34109"
},
{
"name": "Python",
"bytes": "462405"
},
{
"name": "Shell",
"bytes": "263"
}
],
"symlink_target": ""
} |
"""
This program is not to be executed from the command line. It is
exec'd by mpdman to support mpigdb.
"""
# workaround to suppress deprecated module warnings in python2.6
# see https://trac.mcs.anl.gov/projects/mpich2/ticket/362 for tracking
import warnings
warnings.filterwarnings('ignore', '.*the popen2 module is deprecated.*', DeprecationWarning)
from time import ctime
__author__ = "Ralph Butler and Rusty Lusk"
__date__ = ctime()
__version__ = "$Revision: 1.17 $"
__credits__ = ""
from sys import argv, exit, stdin, stdout, stderr
from os import kill, getpid, write, strerror
from popen2 import Popen4
from signal import signal, SIGUSR1, SIGINT, SIGKILL
from errno import EINTR
from select import select, error
from re import findall, sub
from mpdlib import mpd_set_my_id, mpd_print
global appPid, gdbPID
def sig_handler(signum,frame):
global appPid, gdbPID
if signum == SIGINT:
try:
kill(appPid,SIGINT)
except:
pass
elif signum == SIGUSR1:
try:
kill(gdbPid,SIGKILL)
except:
pass
try:
kill(appPid,SIGKILL)
except:
pass
if __name__ == '__main__': # so I can be imported by pydoc
signal(SIGINT,sig_handler)
signal(SIGUSR1,sig_handler)
mpd_set_my_id('mpdgdbdrv')
## mpd_print(1,"RMB:GDBDRV: ARGS=%s" % argv)
if argv[1] == '-attach':
gdb_args = '%s %s' % (argv[2],argv[3]) # userpgm and userpid
else:
if len(argv) > 2:
mpd_print(1, "when using gdb, pass cmd-line args to user pgms via the 'run' cmd")
exit(-1)
gdb_args = argv[1]
gdb_info = Popen4('gdb -q %s' % (gdb_args), 0 )
gdbPid = gdb_info.pid
# print "PID=%d GDBPID=%d" % (getpid(),gdbPid) ; stdout.flush()
gdb_sin = gdb_info.tochild
gdb_sin_fileno = gdb_sin.fileno()
gdb_sout_serr = gdb_info.fromchild
gdb_sout_serr_fileno = gdb_sout_serr.fileno()
write(gdb_sin_fileno,'set prompt (gdb)\\n\n')
gdb_line = gdb_sout_serr.readline()
# check if gdb reports any errors
if findall(r'.*: No such file or directory.',gdb_line) != []:
print gdb_line, ; stdout.flush()
exit(-1)
mpd_print(0000, "LINE1=|%s|" % (gdb_line.rstrip()))
write(gdb_sin_fileno,'set confirm off\n')
gdb_line = gdb_sout_serr.readline()
mpd_print(0000, "LINE2=|%s|" % (gdb_line.rstrip()))
write(gdb_sin_fileno,'handle SIGUSR1 nostop noprint\n')
gdb_line = gdb_sout_serr.readline()
mpd_print(0000, "LINE3=|%s|" % (gdb_line.rstrip()))
write(gdb_sin_fileno,'handle SIGPIPE nostop noprint\n')
gdb_line = gdb_sout_serr.readline()
mpd_print(0000, "LINE4=|%s|" % (gdb_line.rstrip()))
write(gdb_sin_fileno,'set confirm on\n')
gdb_line = gdb_sout_serr.readline()
mpd_print(0000, "LINE5=|%s|" % (gdb_line.rstrip()))
write(gdb_sin_fileno,'echo hi1\n')
gdb_line = gdb_sout_serr.readline()
# mpd_print(0000, "LINE6=|%s|" % (gdb_line.rstrip()))
# gdb_line = ''
while not gdb_line.startswith('hi1'):
gdb_line = gdb_sout_serr.readline()
mpd_print(0000, "LINEx=|%s|" % (gdb_line.rstrip()))
if argv[1] != '-attach':
write(gdb_sin_fileno,'b main\n')
gdb_line = ''
while not gdb_line.startswith('Breakpoint'):
try:
(readyFDs,unused1,unused2) = select([gdb_sout_serr_fileno],[],[],10)
except error, data:
if data[0] == EINTR: # interrupted by timeout for example
continue
else:
print 'mpdgdb_drv: main loop: select error: %s' % strerror(data[0])
if not readyFDs:
mpd_print(1, 'timed out waiting for initial Breakpoint response')
exit(-1)
gdb_line = gdb_sout_serr.readline() # drain breakpoint response
gdb_line = gdb_line.strip()
mpd_print(0000, "gdb_line=|%s|" % (gdb_line.rstrip()))
if not gdb_line.startswith('Breakpoint'):
mpd_print(1, 'expecting "Breakpoint", got :%s:' % (gdb_line) )
exit(-1)
gdb_line = gdb_sout_serr.readline() # drain prompt
mpd_print(0000, "gdb_line=|%s|" % (gdb_line.rstrip()))
if not gdb_line.startswith('(gdb)'):
mpd_print(1, 'expecting "(gdb)", got :%s:' % (gdb_line) )
exit(-1)
print '(gdb)\n', ; stdout.flush() # initial prompt to user
user_fileno = stdin.fileno()
while 1:
try:
(readyFDs,unused1,unused2) = select([user_fileno,gdb_sout_serr_fileno],[],[],1)
except error, data:
if data[0] == EINTR: # interrupted by timeout for example
continue
else:
mpd_print(1, 'mpdgdbdrv: main loop: select error: %s' % strerror(data[0]))
# print "READY=", readyFDs ; stdout.flush()
for readyFD in readyFDs:
if readyFD == gdb_sout_serr_fileno:
gdb_line = gdb_sout_serr.readline()
if not gdb_line:
print "MPIGDB ENDING" ; stdout.flush()
exit(0)
# print "LINE |%s|" % (gdb_line.rstrip()) ; stdout.flush()
print gdb_line, ; stdout.flush()
elif readyFD == user_fileno:
user_line = stdin.readline()
# print "USERLINE=", user_line, ; stdout.flush()
if not user_line:
mpd_print(1, 'mpdgdbdrv: problem: expected user input but got none')
exit(-1)
if user_line.startswith('r'):
write(gdb_sin_fileno,'show prompt\n')
gdb_line = gdb_sout_serr.readline()
gdb_prompt = findall(r'Gdb\'s prompt is "(.+)"\.',gdb_line)
if gdb_prompt == []:
mpd_print(1, 'expecting gdb\'s prompt, got :%s:' % (gdb_line))
exit(-1)
gdb_prompt = gdb_prompt[0]
# cut everything after first escape character (including it)
p = gdb_prompt.find("\\")
if p > 0:
gdb_prompt = gdb_prompt[0:p]
gdb_line = gdb_sout_serr.readline() # drain one line
write(gdb_sin_fileno,'show confirm\n')
gdb_line = gdb_sout_serr.readline()
gdb_confirm = findall(r'Whether to confirm potentially dangerous operations is (on|off)\.',gdb_line)
if gdb_confirm == []:
mpd_print(1, 'expecting gdb\'s confirm state, got :%s:' % (gdb_line))
exit(-1)
gdb_confirm = gdb_confirm[0]
gdb_line = gdb_sout_serr.readline() # drain one line
# set confirm to 'on' to get 'Starting program' message
write(gdb_sin_fileno,'set confirm on\n')
gdb_line = gdb_sout_serr.readline()
# we have already set breakpoint 1 in main
write(gdb_sin_fileno,user_line)
# ignore any warnings befor starting msg
while 1:
gdb_line = gdb_sout_serr.readline() # drain one line
if not gdb_line.startswith('warning:'):
break
else:
print gdb_line, ; stdout.flush()
# drain starting msg
if not gdb_line.startswith('Starting program'):
mpd_print(1, 'expecting "Starting program", got :%s:' % \
(gdb_line))
exit(-1)
while 1: # drain to a prompt
gdb_line = gdb_sout_serr.readline() # drain one line
if gdb_line.startswith(gdb_prompt):
break
# try to get the pid
write(gdb_sin_fileno,'info pid\n') # macosx
gdb_line = gdb_sout_serr.readline().lstrip()
if gdb_line.find('process ID') >= 0: # macosx
appPid = findall(r'.* has process ID (\d+)',gdb_line)
appPid = int(appPid[0])
else:
while 1: # drain to a prompt
gdb_line = gdb_sout_serr.readline() # drain one line
if gdb_line.startswith(gdb_prompt):
break
write(gdb_sin_fileno,'info program\n')
gdb_line = gdb_sout_serr.readline().lstrip()
if gdb_line.startswith('Using'):
if gdb_line.find('process') >= 0:
appPid = findall(r'Using .* image of child process (\d+)',gdb_line)
elif gdb_line.find('Thread') >= 0: # solaris
appPid = findall(r'Using .* image of child .* \(LWP (\d+)\).',gdb_line)
else:
mpd_print(1, 'expecting process or thread line, got :%s:' % \
(gdb_line))
exit(-1)
appPid = int(appPid[0])
else:
mpd_print(1, 'expecting line with "Using"; got :%s:' % (gdb_line))
exit(-1)
while 1: # drain to a prompt
gdb_line = gdb_sout_serr.readline() # drain one line
if gdb_line.startswith(gdb_prompt):
break
write(gdb_sin_fileno,'c\n')
# set confirm back to original state
write(gdb_sin_fileno,'set confirm %s\n' % (gdb_confirm))
else:
write(gdb_sin_fileno,user_line)
| {
"content_hash": "8775dd83951d1dc0a7e867b90e142064",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 120,
"avg_line_length": 44.676991150442475,
"alnum_prop": 0.49767257601267706,
"repo_name": "gnu3ra/SCC15HPCRepast",
"id": "9bc71ad1b421d3d20d918fc82333d0475d13ec8c",
"size": "10215",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "INSTALLATION/mpich2-1.4.1p1/src/pm/mpd/mpdgdbdrv.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "166901"
},
{
"name": "Awk",
"bytes": "4270"
},
{
"name": "Batchfile",
"bytes": "59453"
},
{
"name": "C",
"bytes": "89044644"
},
{
"name": "C#",
"bytes": "171870"
},
{
"name": "C++",
"bytes": "149286410"
},
{
"name": "CMake",
"bytes": "1277735"
},
{
"name": "CSS",
"bytes": "275497"
},
{
"name": "Cuda",
"bytes": "26749"
},
{
"name": "DIGITAL Command Language",
"bytes": "396318"
},
{
"name": "FORTRAN",
"bytes": "5955918"
},
{
"name": "Groff",
"bytes": "1536123"
},
{
"name": "HTML",
"bytes": "152716955"
},
{
"name": "IDL",
"bytes": "14"
},
{
"name": "Java",
"bytes": "1703162"
},
{
"name": "JavaScript",
"bytes": "132031"
},
{
"name": "Lex",
"bytes": "44890"
},
{
"name": "LiveScript",
"bytes": "299224"
},
{
"name": "Logos",
"bytes": "17671"
},
{
"name": "Makefile",
"bytes": "10089555"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Objective-C",
"bytes": "4756"
},
{
"name": "PHP",
"bytes": "74480"
},
{
"name": "Perl",
"bytes": "1444604"
},
{
"name": "Perl6",
"bytes": "9917"
},
{
"name": "PostScript",
"bytes": "4003"
},
{
"name": "Pure Data",
"bytes": "1710"
},
{
"name": "Python",
"bytes": "2280373"
},
{
"name": "QML",
"bytes": "593"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Scilab",
"bytes": "3012"
},
{
"name": "Shell",
"bytes": "11997985"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "696679"
},
{
"name": "Visual Basic",
"bytes": "11578"
},
{
"name": "XSLT",
"bytes": "771726"
},
{
"name": "Yacc",
"bytes": "140274"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import textwrap
from distutils.version import LooseVersion
from collections import Iterator
import sys
import traceback
from contextlib import contextmanager
import warnings
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.common import is_datetime64tz_dtype
import toolz
from ..core import get_deps
from ..async import get_sync
PANDAS_VERSION = LooseVersion(pd.__version__)
if PANDAS_VERSION >= '0.19.0':
PANDAS_ge_0190 = True
from pandas.api.types import is_categorical_dtype, is_scalar # noqa
else:
PANDAS_ge_0190 = False
from pandas.core.common import is_categorical_dtype # noqa
is_scalar = pd.lib.isscalar # noqa
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if is_categorical_dtype(index):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i + 1]]
yield df.iloc[indices[-1]:]
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories,
divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
_META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional"
_META_DESCRIPTION = """\
An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and
column names of the output. This metadata is necessary for many algorithms
in dask dataframe to work. For ease of use, some alternative inputs are
also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``
or iterable of ``(name, dtype)`` can be provided. Instead of a series, a
tuple of ``(name, dtype)`` can be used. If not provided, dask will try to
infer the metadata. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
"""
def insert_meta_param_description(*args, **kwargs):
"""Replace `$META` in docstring with param description.
If pad keyword is provided, will pad description by that number of
spaces (default is 8)."""
if not args:
return lambda f: insert_meta_param_description(f, **kwargs)
f = args[0]
if f.__doc__:
indent = " " * kwargs.get('pad', 8)
body = textwrap.wrap(_META_DESCRIPTION, initial_indent=indent,
subsequent_indent=indent, width=78)
descr = '{0}\n{1}'.format(_META_TYPES, '\n'.join(body))
f.__doc__ = f.__doc__.replace('$META', descr)
return f
@contextmanager
def raise_on_meta_error(funcname=None):
"""Reraise errors in this block to show metadata inference failure.
Parameters
----------
funcname : str, optional
If provided, will be added to the error message to indicate the
name of the method that failed.
"""
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
msg = ("Metadata inference failed{0}.\n\n"
"Original error is below:\n"
"------------------------\n"
"{1}\n\n"
"Traceback:\n"
"---------\n"
"{2}"
).format(" in `{0}`".format(funcname) if funcname else "",
repr(e), tb)
raise ValueError(msg)
def make_meta(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
if hasattr(x, '_meta'):
return x._meta
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[0:0]
elif isinstance(x, pd.Index):
return x[0:0]
index = index if index is None else index[0:0]
if isinstance(x, dict):
return pd.DataFrame({c: pd.Series([], dtype=d)
for (c, d) in x.items()},
index=index)
elif isinstance(x, tuple) and len(x) == 2:
return pd.Series([], dtype=x[1], name=x[0], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError("Expected iterable of tuples of (name, dtype), "
"got {0}".format(x))
return pd.DataFrame({c: pd.Series([], dtype=d) for (c, d) in x},
columns=[c for c, d in x], index=index)
elif not hasattr(x, 'dtype') and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except:
# Continue on to next check
pass
if is_pd_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in (pd.Int64Index, pd.Float64Index):
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(['a', 'b'], name=idx.name)
elif typ is pd.DatetimeIndex:
start = '1970-01-01'
data = [start, start] if idx.freq is None else None
return pd.DatetimeIndex(data, start=start, periods=2, freq=idx.freq,
tz=idx.tz, name=idx.name)
elif typ is pd.PeriodIndex:
return pd.PeriodIndex(start='1970-01-01', periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, 'D')
data = [start, start] if idx.freq is None else None
return pd.TimedeltaIndex(data, start=start, periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.CategoricalIndex:
element = idx.categories[0]
return pd.CategoricalIndex([element, element],
categories=idx.categories,
ordered=idx.ordered, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(i) for i in idx.levels]
labels = [[0, 0] for i in idx.levels]
return pd.MultiIndex(levels=levels, labels=labels, names=idx.names)
raise TypeError("Don't know how to handle index of "
"type {0}".format(type(idx).__name__))
_simple_fake_mapping = {
'b': np.bool_(True),
'V': np.void(b' '),
'M': np.datetime64('1970-01-01'),
'm': np.timedelta64(1),
'S': np.str_('foo'),
'a': np.str_('foo'),
'U': np.unicode_('foo'),
'O': 'foo'
}
def _scalar_from_dtype(dtype):
if dtype.kind in ('i', 'f', 'u'):
return dtype.type(1)
elif dtype.kind == 'c':
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ('m', 'M') else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
def _nonempty_scalar(x):
if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)):
return x
elif np.isscalar(x):
dtype = x.dtype if hasattr(x, 'dtype') else np.dtype(type(x))
return _scalar_from_dtype(dtype)
else:
raise TypeError("Can't handle meta of type "
"'{0}'".format(type(x).__name__))
def is_pd_scalar(x):
"""Whether the object is a scalar type"""
return (np.isscalar(x) or isinstance(x, (pd.Timestamp, pd.Timedelta,
pd.Period)))
def _nonempty_series(s, idx):
dtype = s.dtype
if is_datetime64tz_dtype(dtype):
entry = pd.Timestamp('1970-01-01', tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
entry = s.cat.categories[0]
data = pd.Categorical([entry, entry],
categories=s.cat.categories,
ordered=s.cat.ordered)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
return pd.Series(data, name=s.name, index=idx)
def meta_nonempty(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if isinstance(x, pd.Index):
return _nonempty_index(x)
elif isinstance(x, pd.Series):
idx = _nonempty_index(x.index)
return _nonempty_series(x, idx)
elif isinstance(x, pd.DataFrame):
idx = _nonempty_index(x.index)
data = {i: _nonempty_series(x.iloc[:, i], idx)
for i, c in enumerate(x.columns)}
res = pd.DataFrame(data, index=idx,
columns=np.arange(len(x.columns)))
res.columns = x.columns
return res
elif is_pd_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError("Expected Index, Series, DataFrame, or scalar, "
"got {0}".format(type(x).__name__))
###############################################################
# Testing
###############################################################
def _check_dask(dsk, check_names=True, check_dtypes=True):
import dask.dataframe as dd
if hasattr(dsk, 'dask'):
result = dsk.compute(get=get_sync)
if isinstance(dsk, dd.Index):
assert isinstance(result, pd.Index), type(result)
if check_names:
assert dsk.name == result.name
# cache
assert isinstance(dsk._meta, pd.Index), type(dsk._meta)
if check_names:
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.Series):
assert isinstance(result, pd.Series), type(result)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
# cache
assert isinstance(dsk._meta, pd.Series), type(dsk._meta)
if check_names:
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.DataFrame):
assert isinstance(result, pd.DataFrame), type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
# cache
assert isinstance(dsk._meta, pd.DataFrame), type(dsk._meta)
if check_names:
tm.assert_index_equal(dsk._meta.columns, result.columns)
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.core.Scalar):
assert (np.isscalar(result) or
isinstance(result, (pd.Timestamp, pd.Timedelta)))
if check_dtypes:
assert_dask_dtypes(dsk, result)
else:
msg = 'Unsupported dask instance {0} found'.format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if isinstance(a, pd.DataFrame):
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def assert_eq(a, b, check_names=True, check_dtypes=True,
check_divisions=True, check_index=True, **kwargs):
if check_divisions:
assert_divisions(a)
assert_divisions(b)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)
b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)
if not check_index:
a = a.reset_index(drop=True)
b = b.reset_index(drop=True)
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def eq(*args, **kwargs):
warnings.warn('eq is deprecated. Use assert_frame instead', UserWarning)
assert_eq(*args, **kwargs)
def assert_dask_graph(dask, label):
if hasattr(dask, 'dask'):
dask = dask.dask
assert isinstance(dask, dict)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
else:
msg = "given dask graph doesn't contan label: {0}"
raise AssertionError(msg.format(label))
def assert_divisions(ddf):
if not hasattr(ddf, 'divisions'):
return
if not hasattr(ddf, 'index'):
return
if not ddf.known_divisions:
return
index = lambda x: x if isinstance(x, pd.Index) else x.index
results = get_sync(ddf.dask, ddf._keys())
for i, df in enumerate(results[:-1]):
if len(df):
assert index(df).min() >= ddf.divisions[i]
assert index(df).max() < ddf.divisions[i + 1]
if len(results[-1]):
assert index(results[-1]).min() >= ddf.divisions[-2]
assert index(results[-1]).max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, 'dask'):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert ' ' not in k
if sys.version_info[0] >= 3:
assert k.split('-')[0].isidentifier()
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_types = {'O', 'S', 'U', 'a'} # treat object and strings alike
if numeric_equal:
eq_types.update(('i', 'f'))
if isinstance(res, pd.DataFrame):
for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes],
axis=1).itertuples():
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
elif isinstance(res, (pd.Series, pd.Index)):
a = ddf._meta.dtype
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
if hasattr(ddf._meta, 'dtype'):
a = ddf._meta.dtype
if not hasattr(res, 'dtype'):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
assert type(ddf._meta) == type(res)
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
| {
"content_hash": "79b6518bc82111284f2908109b18fce9",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 79,
"avg_line_length": 33.45353159851301,
"alnum_prop": 0.5615623958217579,
"repo_name": "jeffery-do/Vizdoombot",
"id": "b57fcdcb161b70c298336c9b00d21382d22134b3",
"size": "17998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doom/lib/python3.5/site-packages/dask/dataframe/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "465717"
},
{
"name": "C++",
"bytes": "219269"
},
{
"name": "CSS",
"bytes": "7132"
},
{
"name": "Cuda",
"bytes": "232079"
},
{
"name": "FORTRAN",
"bytes": "9868"
},
{
"name": "HTML",
"bytes": "7089"
},
{
"name": "JavaScript",
"bytes": "23881"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "37513702"
},
{
"name": "Shell",
"bytes": "3838"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^profile/$', 'userprofile.views.profile', name='profile'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "da7afa5d66a44dc2f4014e258f60c3cf",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 71,
"avg_line_length": 32.5625,
"alnum_prop": 0.6986564299424184,
"repo_name": "The-WebOps-Club/example",
"id": "25226184f673be7cb66d22e278e9b7abfdd6f489",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10467"
}
],
"symlink_target": ""
} |
"""
Tests For Scheduler
"""
import mock
from oslo_config import cfg
from oslo_log import log as logging
from cinder import context
from cinder import db
from cinder import exception
from cinder.scheduler import driver
from cinder.scheduler import filter_scheduler
from cinder.scheduler import manager
from cinder import test
CONF = cfg.CONF
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'cinder.scheduler.driver.Scheduler'
class AnException(Exception):
pass
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.manager = self.manager_cls()
self.manager._startup_delay = False
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
@mock.patch('eventlet.sleep')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities')
def test_init_host_with_rpc(self, publish_capabilities_mock, sleep_mock):
self.manager._startup_delay = True
self.manager.init_host_with_rpc()
publish_capabilities_mock.assert_called_once_with(mock.ANY)
sleep_mock.assert_called_once_with(CONF.periodic_interval)
self.assertFalse(self.manager._startup_delay)
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_empty_dict(self, _mock_update_cap):
# Test no capabilities passes empty dictionary
service = 'fake_service'
host = 'fake_host'
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host)
_mock_update_cap.assert_called_once_with(service, host, {})
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities_correct(self, _mock_update_cap):
# Test capabilities passes correctly
service = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.manager.update_service_capabilities(self.context,
service_name=service,
host=host,
capabilities=capabilities)
_mock_update_cap.assert_called_once_with(service, host, capabilities)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.db.volume_update')
def test_create_volume_exception_puts_volume_in_error_state(
self, _mock_volume_update, _mock_sched_create):
# Test NoValidHost exception behavior for create_volume.
# Puts the volume in 'error' state and eats the exception.
_mock_sched_create.side_effect = exception.NoValidHost(reason="")
fake_volume_id = 1
topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id}
self.manager.create_volume(self.context, topic, fake_volume_id,
request_spec=request_spec,
filter_properties={})
_mock_volume_update.assert_called_once_with(self.context,
fake_volume_id,
{'status': 'error'})
_mock_sched_create.assert_called_once_with(self.context, request_spec,
{})
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('eventlet.sleep')
def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create):
fake_volume_id = 1
topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id}
self.manager.create_volume(self.context, topic, fake_volume_id,
request_spec=request_spec,
filter_properties={})
_mock_sched_create.assert_called_once_with(self.context, request_spec,
{})
self.assertFalse(_mock_sleep.called)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.scheduler.driver.Scheduler.is_ready')
@mock.patch('eventlet.sleep')
def test_create_volume_delay_scheduled_after_3_tries(self, _mock_sleep,
_mock_is_ready,
_mock_sched_create):
self.manager._startup_delay = True
fake_volume_id = 1
topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id}
_mock_is_ready.side_effect = [False, False, True]
self.manager.create_volume(self.context, topic, fake_volume_id,
request_spec=request_spec,
filter_properties={})
_mock_sched_create.assert_called_once_with(self.context, request_spec,
{})
calls = [mock.call(1)] * 2
_mock_sleep.assert_has_calls(calls)
self.assertEqual(2, _mock_sleep.call_count)
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
@mock.patch('cinder.scheduler.driver.Scheduler.is_ready')
@mock.patch('eventlet.sleep')
def test_create_volume_delay_scheduled_in_1_try(self, _mock_sleep,
_mock_is_ready,
_mock_sched_create):
self.manager._startup_delay = True
fake_volume_id = 1
topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id}
_mock_is_ready.return_value = True
self.manager.create_volume(self.context, topic, fake_volume_id,
request_spec=request_spec,
filter_properties={})
_mock_sched_create.assert_called_once_with(self.context, request_spec,
{})
self.assertFalse(_mock_sleep.called)
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
@mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_host_passes):
# Test NoValidHost exception behavior for migrate_volume_to_host.
# Puts the volume in 'error_migrating' state and eats the exception.
_mock_host_passes.side_effect = exception.NoValidHost(reason="")
fake_volume_id = 1
topic = 'fake_topic'
request_spec = {'volume_id': fake_volume_id}
self.manager.migrate_volume_to_host(self.context, topic,
fake_volume_id, 'host', True,
request_spec=request_spec,
filter_properties={})
_mock_volume_update.assert_called_once_with(self.context,
fake_volume_id,
{'migration_status': None})
_mock_host_passes.assert_called_once_with(self.context, 'host',
request_spec, {})
def test_chance_simple_scheduler_mocked(self):
# Test FilterScheduler is loaded and predefined combination
# of filters and weighers overrides the default value of config option
# scheduler_default_filters and scheduler_default_weighers when
# ChanceScheduler or SimpleScheduler is configured as scheduler_driver.
chance = 'cinder.scheduler.chance.ChanceScheduler'
simple = 'cinder.scheduler.simple.SimpleScheduler'
default_filters = ['AvailabilityZoneFilter',
'CapacityFilter',
'CapabilitiesFilter']
self.flags(scheduler_driver=chance,
scheduler_default_filters=['CapacityFilter'],
scheduler_default_weighers=['CapacityWeigher'])
self.manager = self.manager_cls()
self.assertTrue(isinstance(self.manager.driver,
filter_scheduler.FilterScheduler))
self.assertEqual(CONF.scheduler_default_filters,
default_filters)
self.assertEqual(CONF.scheduler_default_weighers,
['ChanceWeigher'])
self.flags(scheduler_driver=simple,
scheduler_default_filters=['CapacityFilter'],
scheduler_default_weighers=['CapacityWeigher'])
self.manager = self.manager_cls()
self.assertTrue(isinstance(self.manager.driver,
filter_scheduler.FilterScheduler))
self.assertEqual(CONF.scheduler_default_filters,
default_filters)
self.assertEqual(CONF.scheduler_default_weighers,
['AllocatedCapacityWeigher'])
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.db.volume_get')
def test_retype_volume_exception_returns_volume_state(self, _mock_vol_get,
_mock_vol_update):
# Test NoValidHost exception behavior for retype.
# Puts the volume in original state and eats the exception.
fake_volume_id = 1
topic = 'fake_topic'
volume_id = fake_volume_id
request_spec = {'volume_id': fake_volume_id, 'volume_type': {'id': 3},
'migration_policy': 'on-demand'}
vol_info = {'id': fake_volume_id, 'status': 'in-use',
'volume_attachment': [{'id': 'fake_id',
'instance_uuid': 'foo',
'attached_host': None}]}
_mock_vol_get.return_value = vol_info
_mock_vol_update.return_value = {'status': 'in-use'}
_mock_find_retype_host = mock.Mock(
side_effect=exception.NoValidHost(reason=""))
orig_retype = self.manager.driver.find_retype_host
self.manager.driver.find_retype_host = _mock_find_retype_host
self.manager.retype(self.context, topic, volume_id,
request_spec=request_spec,
filter_properties={})
_mock_vol_get.assert_called_once_with(self.context, fake_volume_id)
_mock_find_retype_host.assert_called_once_with(self.context,
request_spec, {},
'on-demand')
_mock_vol_update.assert_called_once_with(self.context, fake_volume_id,
{'status': 'in-use'})
self.manager.driver.find_retype_host = orig_retype
def test_create_consistencygroup_exceptions(self):
with mock.patch.object(filter_scheduler.FilterScheduler,
'schedule_create_consistencygroup') as mock_cg:
original_driver = self.manager.driver
self.manager.driver = filter_scheduler.FilterScheduler
LOG = logging.getLogger('cinder.scheduler.manager')
self.stubs.Set(LOG, 'error', mock.Mock())
self.stubs.Set(LOG, 'exception', mock.Mock())
self.stubs.Set(db, 'consistencygroup_update', mock.Mock())
ex = exception.CinderException('test')
mock_cg.side_effect = ex
group_id = '1'
self.assertRaises(exception.CinderException,
self.manager.create_consistencygroup,
self.context,
'volume',
group_id)
self.assertTrue(LOG.exception.call_count > 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': 'error'})
mock_cg.reset_mock()
LOG.exception.reset_mock()
db.consistencygroup_update.reset_mock()
mock_cg.side_effect = exception.NoValidHost(
reason="No weighed hosts available")
self.manager.create_consistencygroup(
self.context, 'volume', group_id)
self.assertTrue(LOG.error.call_count > 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': 'error'})
self.manager.driver = original_driver
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
@mock.patch('cinder.scheduler.driver.Scheduler.'
'update_service_capabilities')
def test_update_service_capabilities(self, _mock_update_cap):
service_name = 'fake_service'
host = 'fake_host'
capabilities = {'fake_capability': 'fake_value'}
self.driver.update_service_capabilities(service_name, host,
capabilities)
_mock_update_cap.assert_called_once_with(service_name, host,
capabilities)
@mock.patch('cinder.scheduler.host_manager.HostManager.'
'has_all_capabilities', return_value=False)
def test_is_ready(self, _mock_has_caps):
ready = self.driver.is_ready()
_mock_has_caps.assert_called_once_with()
self.assertFalse(ready)
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods
that can't will fail if the driver is changed.
"""
def test_unimplemented_schedule(self):
fake_args = (1, 2, 3)
fake_kwargs = {'cat': 'meow'}
self.assertRaises(NotImplementedError, self.driver.schedule,
self.context, self.topic, 'schedule_something',
*fake_args, **fake_kwargs)
class SchedulerDriverModuleTestCase(test.TestCase):
"""Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
@mock.patch('cinder.db.volume_update')
@mock.patch('oslo_utils.timeutils.utcnow')
def test_volume_host_update_db(self, _mock_utcnow, _mock_vol_update):
_mock_utcnow.return_value = 'fake-now'
driver.volume_update_db(self.context, 31337, 'fake_host')
_mock_vol_update.assert_called_once_with(self.context, 31337,
{'host': 'fake_host',
'scheduled_at': 'fake-now'})
| {
"content_hash": "362419881646250a3229add41107b3e3",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 79,
"avg_line_length": 45.48255813953488,
"alnum_prop": 0.5701776811964719,
"repo_name": "julianwang/cinder",
"id": "288f4ccc777221fd194cada6e23f4fc41cb213a5",
"size": "16378",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/scheduler/test_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10718052"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
import hypermap.aggregator.models
class Migration(migrations.Migration):
dependencies = [
('aggregator', '0007_remove_uuid_null'),
]
operations = [
migrations.AlterField(
model_name='layer',
name='csw_last_updated',
field=models.CharField(default=hypermap.aggregator.models.get_default_now_as_string, max_length=32, null=True, blank=True),
),
migrations.AlterField(
model_name='layer',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, unique=True, editable=False),
),
migrations.AlterField(
model_name='service',
name='csw_last_updated',
field=models.CharField(default=hypermap.aggregator.models.get_default_now_as_string, max_length=32, null=True, blank=True),
),
migrations.AlterField(
model_name='service',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, unique=True, editable=False),
),
]
| {
"content_hash": "113691f6788550c51eab7c776126f43a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 135,
"avg_line_length": 32.51428571428571,
"alnum_prop": 0.6133567662565905,
"repo_name": "cga-harvard/hypermap",
"id": "d41830b6f40b27d832ffe054e100b9f74e21b3c6",
"size": "1162",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hypermap/aggregator/migrations/0008_alter_cws_last_updated_and_uuid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "139270"
},
{
"name": "HTML",
"bytes": "28842"
},
{
"name": "Python",
"bytes": "81613"
},
{
"name": "Ruby",
"bytes": "794"
},
{
"name": "Shell",
"bytes": "522"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from tempfile import mkdtemp
import os
from shutil import rmtree
import numpy as np
from ..export_d3po import make_data_file
from ...core import Data, Subset
from ...tests.helpers import requires_astropy
@requires_astropy
def test_make_data_file():
from astropy.table import Table
# astropy.Table interface has changed across versions. Check
# that we build a valid table
d = Data(x=[1, 2, 3], y=[2, 3, 4], label='data')
s = d.new_subset(label='test')
s.subset_state = d.id['x'] > 1
dir = mkdtemp()
try:
make_data_file(d, (s,), dir)
t = Table.read(os.path.join(dir, 'data.csv'), format='ascii')
np.testing.assert_array_equal(t['x'], [1, 2, 3])
np.testing.assert_array_equal(t['y'], [2, 3, 4])
np.testing.assert_array_equal(t['selection_0'], [0, 1, 1])
finally:
rmtree(dir, ignore_errors=True)
| {
"content_hash": "3d70050704328a26bb4ed5274fc3c978",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 69,
"avg_line_length": 27.970588235294116,
"alnum_prop": 0.6393270241850684,
"repo_name": "JudoWill/glue",
"id": "2533556f7edcb34594835b64dbd71d7585730cd6",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/plugins/tests/test_d3po.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "2352"
},
{
"name": "Python",
"bytes": "1387891"
},
{
"name": "Shell",
"bytes": "1968"
}
],
"symlink_target": ""
} |
import sys, os, random, gc, time, warnings
from twisted.internet import defer
from twisted.application import app
from twisted.python import usage, reflect, failure
from twisted import plugin
from twisted.python.util import spewer
from twisted.python.compat import set
from twisted.trial import runner, itrial, reporter
# Yea, this is stupid. Leave it for for command-line compatibility for a
# while, though.
TBFORMAT_MAP = {
'plain': 'default',
'default': 'default',
'emacs': 'brief',
'brief': 'brief',
'cgitb': 'verbose',
'verbose': 'verbose'
}
def _parseLocalVariables(line):
"""Accepts a single line in Emacs local variable declaration format and
returns a dict of all the variables {name: value}.
Raises ValueError if 'line' is in the wrong format.
See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html
"""
paren = '-*-'
start = line.find(paren) + len(paren)
end = line.rfind(paren)
if start == -1 or end == -1:
raise ValueError("%r not a valid local variable declaration" % (line,))
items = line[start:end].split(';')
localVars = {}
for item in items:
if len(item.strip()) == 0:
continue
split = item.split(':')
if len(split) != 2:
raise ValueError("%r contains invalid declaration %r"
% (line, item))
localVars[split[0].strip()] = split[1].strip()
return localVars
def loadLocalVariables(filename):
"""Accepts a filename and attempts to load the Emacs variable declarations
from that file, simulating what Emacs does.
See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html
"""
f = file(filename, "r")
lines = [f.readline(), f.readline()]
f.close()
for line in lines:
try:
return _parseLocalVariables(line)
except ValueError:
pass
return {}
def getTestModules(filename):
testCaseVar = loadLocalVariables(filename).get('test-case-name', None)
if testCaseVar is None:
return []
return testCaseVar.split(',')
def isTestFile(filename):
"""Returns true if 'filename' looks like a file containing unit tests.
False otherwise. Doesn't care whether filename exists.
"""
basename = os.path.basename(filename)
return (basename.startswith('test_')
and os.path.splitext(basename)[1] == ('.py'))
def _zshReporterAction():
return "(%s)" % (" ".join([p.longOpt for p in plugin.getPlugins(itrial.IReporter)]),)
class Options(usage.Options, app.ReactorSelectionMixin):
synopsis = """%s [options] [[file|package|module|TestCase|testmethod]...]
""" % (os.path.basename(sys.argv[0]),)
longdesc = ("trial loads and executes a suite of unit tests, obtained "
"from modules, packages and files listed on the command line.")
optFlags = [["help", "h"],
["rterrors", "e", "realtime errors, print out tracebacks as "
"soon as they occur"],
["debug", "b", "Run tests in the Python debugger. Will load "
"'.pdbrc' from current directory if it exists."],
["debug-stacktraces", "B", "Report Deferred creation and "
"callback stack traces"],
["nopm", None, "don't automatically jump into debugger for "
"postmorteming of exceptions"],
["dry-run", 'n', "do everything but run the tests"],
["force-gc", None, "Have Trial run gc.collect() before and "
"after each test case."],
["profile", None, "Run tests under the Python profiler"],
["unclean-warnings", None,
"Turn dirty reactor errors into warnings"],
["until-failure", "u", "Repeat test until it fails"],
["no-recurse", "N", "Don't recurse into packages"],
['help-reporters', None,
"Help on available output plugins (reporters)"]
]
optParameters = [
["logfile", "l", "test.log", "log file name"],
["random", "z", None,
"Run tests in random order using the specified seed"],
['temp-directory', None, '_trial_temp',
'Path to use as working directory for tests.'],
['reporter', None, 'verbose',
'The reporter to use for this test run. See --help-reporters for '
'more info.']]
zsh_actions = {"tbformat":"(plain emacs cgitb)",
"reporter":_zshReporterAction}
zsh_actionDescr = {"logfile":"log file name",
"random":"random seed"}
zsh_extras = ["*:file|module|package|TestCase|testMethod:_files -g '*.py'"]
fallbackReporter = reporter.TreeReporter
extra = None
tracer = None
def __init__(self):
self['tests'] = set()
usage.Options.__init__(self)
def opt_coverage(self):
"""
Generate coverage information in the _trial_temp/coverage. Requires
Python 2.3.3.
"""
coverdir = 'coverage'
print "Setting coverage directory to %s." % (coverdir,)
import trace
# begin monkey patch ---------------------------
# Before Python 2.4, this function asserted that 'filename' had
# to end with '.py' This is wrong for at least two reasons:
# 1. We might be wanting to find executable line nos in a script
# 2. The implementation should use os.splitext
# This monkey patch is the same function as in the stdlib (v2.3)
# but with the assertion removed.
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number
table.
"""
#assert filename.endswith('.py') # YOU BASTARDS
try:
prog = open(filename).read()
prog = '\n'.join(prog.splitlines()) + '\n'
except IOError, err:
sys.stderr.write("Not printing coverage data for %r: %s\n"
% (filename, err))
sys.stderr.flush()
return {}
code = compile(prog, filename, "exec")
strs = trace.find_strings(filename)
return trace.find_lines(code, strs)
trace.find_executable_linenos = find_executable_linenos
# end monkey patch ------------------------------
self.coverdir = os.path.abspath(os.path.join(self['temp-directory'], coverdir))
self.tracer = trace.Trace(count=1, trace=0)
sys.settrace(self.tracer.globaltrace)
def opt_testmodule(self, filename):
"Filename to grep for test cases (-*- test-case-name)"
# If the filename passed to this parameter looks like a test module
# we just add that to the test suite.
#
# If not, we inspect it for an Emacs buffer local variable called
# 'test-case-name'. If that variable is declared, we try to add its
# value to the test suite as a module.
#
# This parameter allows automated processes (like Buildbot) to pass
# a list of files to Trial with the general expectation of "these files,
# whatever they are, will get tested"
if not os.path.isfile(filename):
sys.stderr.write("File %r doesn't exist\n" % (filename,))
return
filename = os.path.abspath(filename)
if isTestFile(filename):
self['tests'].add(filename)
else:
self['tests'].update(getTestModules(filename))
def opt_spew(self):
"""Print an insanely verbose log of everything that happens. Useful
when debugging freezes or locks in complex code."""
sys.settrace(spewer)
def opt_help_reporters(self):
synopsis = ("Trial's output can be customized using plugins called "
"Reporters. You can\nselect any of the following "
"reporters using --reporter=<foo>\n")
print synopsis
for p in plugin.getPlugins(itrial.IReporter):
print ' ', p.longOpt, '\t', p.description
print
sys.exit(0)
def opt_disablegc(self):
"""Disable the garbage collector"""
gc.disable()
def opt_tbformat(self, opt):
"""Specify the format to display tracebacks with. Valid formats are
'plain', 'emacs', and 'cgitb' which uses the nicely verbose stdlib
cgitb.text function"""
try:
self['tbformat'] = TBFORMAT_MAP[opt]
except KeyError:
raise usage.UsageError(
"tbformat must be 'plain', 'emacs', or 'cgitb'.")
def opt_extra(self, arg):
"""
Add an extra argument. (This is a hack necessary for interfacing with
emacs's `gud'.)
"""
if self.extra is None:
self.extra = []
self.extra.append(arg)
opt_x = opt_extra
def opt_recursionlimit(self, arg):
"""see sys.setrecursionlimit()"""
try:
sys.setrecursionlimit(int(arg))
except (TypeError, ValueError):
raise usage.UsageError(
"argument to recursionlimit must be an integer")
def opt_random(self, option):
try:
self['random'] = long(option)
except ValueError:
raise usage.UsageError(
"Argument to --random must be a positive integer")
else:
if self['random'] < 0:
raise usage.UsageError(
"Argument to --random must be a positive integer")
elif self['random'] == 0:
self['random'] = long(time.time() * 100)
def opt_without_module(self, option):
"""
Fake the lack of the specified modules, separated with commas.
"""
for module in option.split(","):
if module in sys.modules:
warnings.warn("Module '%s' already imported, "
"disabling anyway." % (module,),
category=RuntimeWarning)
sys.modules[module] = None
def parseArgs(self, *args):
self['tests'].update(args)
if self.extra is not None:
self['tests'].update(self.extra)
def _loadReporterByName(self, name):
for p in plugin.getPlugins(itrial.IReporter):
qual = "%s.%s" % (p.module, p.klass)
if p.longOpt == name:
return reflect.namedAny(qual)
raise usage.UsageError("Only pass names of Reporter plugins to "
"--reporter. See --help-reporters for "
"more info.")
def postOptions(self):
# Only load reporters now, as opposed to any earlier, to avoid letting
# application-defined plugins muck up reactor selecting by importing
# t.i.reactor and causing the default to be installed.
self['reporter'] = self._loadReporterByName(self['reporter'])
if 'tbformat' not in self:
self['tbformat'] = 'default'
if self['nopm']:
if not self['debug']:
raise usage.UsageError("you must specify --debug when using "
"--nopm ")
failure.DO_POST_MORTEM = False
def _initialDebugSetup(config):
# do this part of debug setup first for easy debugging of import failures
if config['debug']:
failure.startDebugMode()
if config['debug'] or config['debug-stacktraces']:
defer.setDebugging(True)
def _getSuite(config):
loader = _getLoader(config)
recurse = not config['no-recurse']
return loader.loadByNames(config['tests'], recurse)
def _getLoader(config):
loader = runner.TestLoader()
if config['random']:
randomer = random.Random()
randomer.seed(config['random'])
loader.sorter = lambda x : randomer.random()
print 'Running tests shuffled with seed %d\n' % config['random']
if not config['until-failure']:
loader.suiteFactory = runner.DestructiveTestSuite
return loader
def _makeRunner(config):
mode = None
if config['debug']:
mode = runner.TrialRunner.DEBUG
if config['dry-run']:
mode = runner.TrialRunner.DRY_RUN
return runner.TrialRunner(config['reporter'],
mode=mode,
profile=config['profile'],
logfile=config['logfile'],
tracebackFormat=config['tbformat'],
realTimeErrors=config['rterrors'],
uncleanWarnings=config['unclean-warnings'],
workingDirectory=config['temp-directory'],
forceGarbageCollection=config['force-gc'])
def run():
if len(sys.argv) == 1:
sys.argv.append("--help")
config = Options()
try:
config.parseOptions()
except usage.error, ue:
raise SystemExit, "%s: %s" % (sys.argv[0], ue)
_initialDebugSetup(config)
trialRunner = _makeRunner(config)
suite = _getSuite(config)
if config['until-failure']:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if config.tracer:
sys.settrace(None)
results = config.tracer.results()
results.write_results(show_missing=1, summary=False,
coverdir=config.coverdir)
sys.exit(not test_result.wasSuccessful())
| {
"content_hash": "0d94c7ec7632da49ef104b8c2b2210fa",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 89,
"avg_line_length": 37.49175824175824,
"alnum_prop": 0.5755843775188686,
"repo_name": "jxta/cc",
"id": "a454d170e1efa5da3181fd006717f29bf7d41ff8",
"size": "13789",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "vendor/Twisted-10.0.0/twisted/scripts/trial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "315721"
},
{
"name": "Shell",
"bytes": "7870"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import cgi
import email.utils
import getpass
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
try:
import ssl # noqa
HAS_TLS = True
except ImportError:
HAS_TLS = False
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip.exceptions import InstallationError, HashMismatch
from pip.models import PyPI
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
ARCHIVE_EXTENSIONS, consume, call_subprocess)
from pip.utils.encoding import auto_decode
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip.utils.glibc import libc_ver
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
from pip.locations import write_delete_marker_file
from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._vendor.requests.utils import get_netrc_auth
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.six.moves import xmlrpc_client
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
from pip._vendor import distro
distro_infos = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], distro.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], libc_ver()),
))
if libc:
distro_infos["libc"] = libc
if distro_infos:
data["distro"] = distro_infos
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
# Python 2.6 doesn't have ssl.OPENSSL_VERSION.
if HAS_TLS and sys.version_info[:2] > (2, 6):
data["openssl_version"] = ssl.OPENSSL_VERSION
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
# Get creds from netrc if we still don't have them
if username is None and password is None:
netrc_auth = get_netrc_auth(req.url)
username, password = netrc_auth if netrc_auth else (None, None)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
status_forcelist=[503],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{0}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
return resp.url, resp.text
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# if we have a UNC path, prepend UNC share notation
if netloc:
netloc = '\\\\' + netloc
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def is_dir_url(link):
"""Return whether a file:// Link points to a directory.
``link`` must not have any other scheme but file://. Call is_file_url()
first.
"""
link_path = url_to_path(link.url_without_fragment)
return os.path.isdir(link_path)
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(resp, link, content_file, hashes):
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
def written_chunks(chunks):
for chunk in chunks:
content_file.write(chunk)
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
if total_length:
logger.info("Downloading %s (%s)", url, format_size(total_length))
progress_indicator = DownloadProgressBar(max=total_length).iter
else:
logger.info("Downloading %s", url)
progress_indicator = DownloadProgressSpinner().iter
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
downloaded_chunks = written_chunks(
progress_indicator(
resp_read(CONTENT_CHUNK_SIZE),
CONTENT_CHUNK_SIZE
)
)
if hashes:
hashes.check_against_chunks(downloaded_chunks)
else:
consume(downloaded_chunks)
def _copy_file(filename, location, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
display_path(download_location), ('i', 'w', 'b', 'a'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
elif response == 'a':
sys.exit(-1)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(link, location, download_dir=None,
session=None, hashes=None):
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link,
session,
temp_dir,
hashes)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
if not already_downloaded_path:
os.unlink(from_path)
rmtree(temp_dir)
def unpack_file_url(link, location, download_dir=None, hashes=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir.
"""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if is_dir_url(link):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(link_path)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link,
download_dir,
hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, link)
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(SETUPTOOLS_SHIM % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(link, location, download_dir=None,
only_download=False, session=None, hashes=None):
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir, hashes=hashes)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
hashes=hashes
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(link, session, temp_dir, hashes):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file, hashes)
return file_path, content_type
def _check_download_dir(link, download_dir, hashes):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| {
"content_hash": "a34b88bdab37a252e84598e6a7c73e52",
"timestamp": "",
"source": "github",
"line_count": 906,
"max_line_length": 79,
"avg_line_length": 35.48896247240618,
"alnum_prop": 0.6002239293378534,
"repo_name": "Code-In-Action/python-in-action",
"id": "039e55aed191a582628db880b0b6f3ea4b30abb7",
"size": "32153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/todo-api/flask/lib/python3.6/site-packages/pip/download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Python",
"bytes": "2982843"
},
{
"name": "Shell",
"bytes": "3292"
}
],
"symlink_target": ""
} |
""" This script parse and download the attachments of an email """
# coding:utf-8
import os
import poplib
import email
import time
from datetime import datetime
from email._parseaddr import parsedate_tz
import util
from Preprocess import main as predo
import const
__author__ = "Aiden"
__date__ = "2016-12-28"
__copyright__ = "Copyright 2016, wochacha"
__version__ = "1.1.2"
class Email(object):
"""This class is to parse an email and get its attachment files """
# email configuration
def __init__(self):
self.dateDir = datetime.now().strftime("%Y%m%d")
self.host = const.EMAIL_CONF['host']
self.port = const.EMAIL_CONF['port']
self.user_id = const.EMAIL_CONF['user']
self.pwd = const.EMAIL_CONF['password']
self.uidl = ''
self.mail_id = ''
self.send_date = ''
self.biz_name = ''
self.send_datetime = ''
def main(self):
""" parse the email and save its attachments """
mail_server = poplib.POP3(self.host, self.port)
try:
mail_server.user(self.user_id)
mail_server.pass_(self.pwd)
except poplib.error_proto as e:
print("Login failed:", e)
exit(1)
(mail_count, size) = mail_server.stat()
print("MailNum: {0} Size: {1}MB".format(
mail_count, round(size / 1024 / 1024)))
results = util.mysql_execute("select uidl from tesla_email")
# get local uidl list
loc_uidl = [loc_one[0] for loc_one in results]
# Get a unique identification of an email
# construct a list that contains ready-to-download id
serv_uidl_dic = {serv_one.decode().split(
)[-1]: serv_one.decode().split()[0] for serv_one in mail_server.uidl()[1]}
# new email uidl set
new_uidl = set(serv_uidl_dic.keys()).difference(set(loc_uidl))
if not new_uidl:
print("No new emails !")
exit(0)
# Loop email sequence
for uidl in new_uidl:
self.mail_id = serv_uidl_dic[uidl]
print('Now downloading the email No.{}'.format(self.mail_id))
self.parse_email(mail_server, uidl)
print("=================================")
# log out from mail server
mail_server.quit()
def parse_email(self, mail_conn, uidl):
"""parse email content"""
messages = mail_conn.retr(self.mail_id)[1]
content = email.message_from_bytes(
'\n'.encode('utf-8').join(messages))
subject = email.header.decode_header(content.get('subject'))
mail_from = email.utils.parseaddr(content.get("from"))[1]
print("From:", mail_from)
raw_date_time = parsedate_tz(content.get('date'))
# (y, month, d, h, min, sec, _, _, _, tzoffset) = parsedate_tz(content.get('date'))
# sentDate = "%d-%02d-%02d %02d:%02d:%02d" % (y, month, d, h, min, sec)
self.send_datetime = time.strftime(
"%Y-%m-%d %H:%M:%S", raw_date_time[0:-1])
print("Date:", self.send_datetime)
self.send_date = time.strftime("%Y%m%d", raw_date_time[:-1])
sub = self.decode_str(subject[0][0])
print("Subject:", sub)
# Insert new email-info into the table: tesla_email
util.mysql_execute("""INSERT INTO tesla_email(
uidl, sequence, subject, mail_from, send_date) values(
'{}', {}, '{}', '{}', '{}') """.format(
uidl, self.mail_id, sub, mail_from, self.send_datetime))
self.download_files(content)
def download_files(self, mail):
"""download email attachemnts"""
for part in mail.walk():
if not part.is_multipart():
# Start to deal with attachments
name = part.get_param('name')
if name:
tmp_name = email.header.decode_header(name)
filename = self.decode_str(tmp_name[0][0])
print('Attachment:', filename)
# set download path
filepath = os.path.join(const.WIN_EMAIL_DIR, self.send_date)
# If not exist, create it
if not os.path.exists(filepath):
os.makedirs(filepath)
filename = self.set_filename(filepath, filename)
full_path = os.path.join(filepath, filename)
# save file
with open(full_path, 'wb') as fo:
fo.write(part.get_payload(decode=True))
md5 = util.get_file_md5(full_path)
file_size = util.format_filesize(full_path)
# insert the record into mysql table
util.mysql_execute("""INSERT IGNORE INTO importdata.tesla_file(
uidl, filename, size, send_date, md5) values (
'{}', '{}', '{}', '{}', '{}') """.format(
self.mail_id, filename, file_size, self.send_datetime, md5))
predo(full_path, 'tesla')
else:
pass
# deal with email contents
# ch = par.get_content_charset()
# if ch == None:
# print(par.get_payload(decode=True).decode())
# else:
# print(par.get_payload(decode=True).decode(ch))
@staticmethod
def set_filename(filepath, filename, n=1):
""" dddd"""
while os.path.exists(os.path.join(filepath, filename)):
filename = "{}({}).{}".format(
os.path.splitext(filename)[0], str(n), os.path.splitext(filename)[1])
n += 1
return filename
@staticmethod
def decode_str(name):
"""if byte string then decode it"""
if isinstance(name[0][0], bytes):
try:
if name[0][1] is None:
output_name = name[0][0].decode()
else:
output_name = name[0][0].decode(name[0][1])
except UnicodeDecodeError:
output_name = name[0][0].decode('gb18030')
else:
output_name = name[0][0]
return output_name
# main call
if __name__ == '__main__':
OBJ_GET_EMAIL = Email()
OBJ_GET_EMAIL.main()
# mailServer.dele(i)
# mailServer.quit()
| {
"content_hash": "6df9041b0facacd638be7aab3b157119",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 92,
"avg_line_length": 39.25,
"alnum_prop": 0.5229144011185335,
"repo_name": "Macchiatto/mail-attachments-downloader",
"id": "bf06ef4f9d15e4feadaf295dc26a3cf720152398",
"size": "6437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6437"
}
],
"symlink_target": ""
} |
"""
Base class for all nodes in the scene graph. It is implemented
using the composite pattern.
Responsibilities:
- Hold the relative position to its parent.
- Blit itself on the parent.
- Dirty flag itself to trigger regeneration of surface.
"""
class Component(object):
def __init__(self):
self._position = (0, 0)
self._dirty = True
self._surface = None
def draw(self, parent):
self._recreate_surface()
if self._surface and parent:
parent.blit(self._surface, self._position)
def set_position(self, position):
self._position = position
def surface(self):
return None
def dirty(self):
self._dirty = True
def _recreate_surface(self):
if self._dirty:
self._surface = self.surface()
self._dirty = False
"""
Decorator to mark component methods that change the look
of the surface and therefor need to trigger regeneration.
"""
def recreate_surface(function):
def wrapper(self, *args):
self.dirty()
return function(self, *args)
return wrapper
| {
"content_hash": "ea730b3111c3e2e30c7b7899fcaa8f32",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 62,
"avg_line_length": 25.045454545454547,
"alnum_prop": 0.6370235934664247,
"repo_name": "sirmar/tetris",
"id": "18f854d981f76690887454dc23d04a53ca8373a5",
"size": "1102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tetris/visibles/component.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37662"
}
],
"symlink_target": ""
} |
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('newe.sitetheme')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
| {
"content_hash": "b3c87169661072928b92fa0fa05b4e58",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 59,
"avg_line_length": 21.555555555555557,
"alnum_prop": 0.7216494845360825,
"repo_name": "a25kk/newe",
"id": "ed4477324f3bd48b980036e892a38bb02ae553fc",
"size": "218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/newe.sitetheme/newe/sitetheme/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56680"
},
{
"name": "Dockerfile",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "105462"
},
{
"name": "JavaScript",
"bytes": "58909"
},
{
"name": "Makefile",
"bytes": "1092"
},
{
"name": "Python",
"bytes": "73174"
},
{
"name": "Shell",
"bytes": "924"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from profiles import views
urlpatterns = patterns('',
url(r'^(?P<name>\w+)$', views.show_user, name='show_user'),
)
| {
"content_hash": "bd29c208f9f5788b238748a7d01b83cf",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 63,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.6787878787878788,
"repo_name": "SolusOS-discontinued/RepoHub",
"id": "18312cd0269f45a420e8f3c062ce518085c88ab8",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "8239"
},
{
"name": "Python",
"bytes": "71737"
}
],
"symlink_target": ""
} |
from .node import Function, Variable, Add
from .model import Model
import numpy as np
import logging
class Individual(object):
"""Object to store in individual on prefix notation"""
def __init__(self, ind, classifier=True, labels=None):
self._ind = ind
self._pos = 0
self._classifier = classifier
self._labels = labels
self._X = None
@property
def individual(self):
"Individual"
return self._ind
def decision_function(self, X):
"Decision function i.e. the raw data of the prediction"
self._X = Model.convert_features(X)
self._eval()
return self._ind[0].hy
def _eval(self):
"Evaluates a individual using recursion and self._pos as pointer"
pos = self._pos
self._pos += 1
node = self._ind[pos]
if isinstance(node, Function):
args = [self._eval() for x in range(node.nargs)]
node.eval(args)
for x in args:
x.hy = None
x.hy_test = None
else:
node.eval(self._X)
return node
class Population(object):
"Population of a tree-based GP system"
def __init__(self, function_set=None, nterminals=None, seed=0):
assert function_set is not None
assert nterminals is not None
self._function_set = function_set
self._nterminals = nterminals
self._logger = logging.getLogger('EvoDAG')
np.random.seed(seed)
def random_function(self):
func = np.random.randint(len(self._function_set))
func = self._function_set[func]
if issubclass(func, Add) and func.nargs > 1:
return func(range(func.nargs), weight=np.ones(func.nargs))
elif func.nargs == 1:
return func(0, weight=1)
return func(range(func.nargs), weight=1)
def random_terminal(self):
terminal = np.random.randint(self._nterminals)
return Variable(terminal, 1)
def create_random_ind_full(self, depth=0):
"Random individual using full method"
lst = []
self._create_random_ind_full(depth=depth, output=lst)
return lst
def _create_random_ind_full(self, depth=0, output=None):
if depth == 0:
output.append(self.random_terminal())
else:
func = self.random_function()
output.append(func)
depth -= 1
[self._create_random_ind_full(depth=depth, output=output)
for x in range(func.nargs)]
def grow_use_function(self, depth=0):
"Select either function or terminal in grow method"
if depth == 0:
return False
if depth == self._depth:
return True
return np.random.random() < 0.5
def create_random_ind_grow(self, depth=0):
"Random individual using grow method"
lst = []
self._depth = depth
self._create_random_ind_grow(depth=depth, output=lst)
return lst
def _create_random_ind_grow(self, depth=0, output=None):
if self.grow_use_function(depth=depth):
func = self.random_function()
output.append(func)
depth -= 1
[self._create_random_ind_grow(depth=depth, output=output)
for x in range(func.nargs)]
else:
output.append(self.random_terminal())
def create_population(self, popsize=1000, min_depth=2,
max_depth=4,
X=None):
"Creates random population using ramped half-and-half method"
import itertools
args = [x for x in itertools.product(range(min_depth,
max_depth+1),
[True, False])]
index = 0
output = []
while len(output) < popsize:
depth, full = args[index]
index += 1
if index >= len(args):
index = 0
if full:
ind = self.create_random_ind_full(depth=depth)
else:
ind = self.create_random_ind_grow(depth=depth)
flag = True
if X is not None:
x = Individual(ind)
x.decision_function(X)
flag = x.individual[0].isfinite()
l_vars = (flag, len(output), full, depth, len(ind))
l_str = " flag: %s len(output): %s full: %s depth: %s len(ind): %s"
self._logger.debug(l_str % l_vars)
if flag:
output.append(ind)
return output
| {
"content_hash": "0551a800b7cbf75d66690df5e246e2c3",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 33.722627737226276,
"alnum_prop": 0.5445887445887446,
"repo_name": "mgraffg/EvoDAG",
"id": "e63db38bb913fde125407a2bd7617111e3e1e30c",
"size": "5203",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "EvoDAG/gp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cython",
"bytes": "34555"
},
{
"name": "Python",
"bytes": "314678"
}
],
"symlink_target": ""
} |
from channels.generic.websocket import JsonWebsocketConsumer
from ..models import Post
class LiveblogConsumer(JsonWebsocketConsumer):
def _get_post(self, kwargs):
apphook = kwargs.get("apphook")
lang = kwargs.get("lang")
slug = kwargs.get("post")
try:
return Post.objects.namespace(apphook).language(lang).active_translations(slug=slug).get()
except Post.DoesNotExist:
return
def websocket_connect(self, message):
self.groups = self.get_groups()
return super().websocket_connect(message)
def get_groups(self):
"""
Connect users to the group of the post according to the URL parameters
"""
post = self._get_post(self.scope["url_route"]["kwargs"])
if post:
return [post.liveblog_group]
else:
return []
| {
"content_hash": "7f8cf876daa07e2cf1971c5026a8ef43",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 102,
"avg_line_length": 31.035714285714285,
"alnum_prop": 0.61795166858458,
"repo_name": "nephila/djangocms-blog",
"id": "27d2bd89f0aa5cfc0c7d5e5c2a741954a9816935",
"size": "869",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "djangocms_blog/liveblog/consumers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "16041"
},
{
"name": "JavaScript",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "389126"
}
],
"symlink_target": ""
} |
from .logging_handler import LoggingHandler
class Pin(LoggingHandler):
def __init__(self, header, pinNumber):
super(Pin, self).__init__()
self._header = header
self._pin_number = int(pinNumber)
@property
def pin_number(self):
return self._pin_number
| {
"content_hash": "925587dc4f2ca081f4e2236b81c4d91d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 43,
"avg_line_length": 21.357142857142858,
"alnum_prop": 0.6220735785953178,
"repo_name": "omni-resources/fof-motor-controller",
"id": "63a1f9c63d539ec343e0614d6e715c4a971d9a4d",
"size": "299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "motor_controller/pin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19888"
}
],
"symlink_target": ""
} |
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from models import MenuPlugin
class Menu(CMSPluginBase):
model = MenuPlugin
name = _("Menu")
render_template = "menu.html"
def render(self, context, instance, placeholder):
if instance.url:
link = instance.url
elif instance.page:
link = instance.page.get_absolute_url()
else:
link = ""
user = context.get('user')
show = instance.condition == 'B' or \
(instance.condition == 'A' and user.is_authenticated()) or \
(instance.condition == 'U' and not user.is_authenticated())
context.update({
'show': show,
'title': instance.title,
'link': link,
'class': instance.css_class,
'target':instance.target,
'placeholder': placeholder,
'object': instance
})
return context
plugin_pool.register_plugin(Menu) | {
"content_hash": "3852cbc545d8b5a2b144ee8b0f78819b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 32.121212121212125,
"alnum_prop": 0.5858490566037736,
"repo_name": "sophilabs/pyconuy-site",
"id": "a15ef14f384bf443ac5781c1af1d167dcaad27b9",
"size": "1060",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyconuy2012/main/cms_plugins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15258"
},
{
"name": "HTML",
"bytes": "52967"
},
{
"name": "Python",
"bytes": "31276"
}
],
"symlink_target": ""
} |
import datetime
from oslo.config import cfg
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import test
from nova.tests import fake_network
from nova.tests.image import fake
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
class EC2ValidateTestCase(test.TestCase):
def setUp(self):
super(EC2ValidateTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver')
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
fake_network.set_stub_network_methods(self.stubs)
# set up our cloud
self.cloud = cloud.CloudController()
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.image_service = fake.FakeImageService()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.EC2_MALFORMED_IDS = ['foobar', '', 123]
self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']
self.ec2_id_exception_map = [(x, exception.InvalidInstanceIDMalformed)
for x in self.EC2_MALFORMED_IDS]
self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
for x in self.EC2_VALID__IDS])
self.volume_id_exception_map = [(x,
exception.InvalidInstanceIDMalformed)
for x in self.EC2_MALFORMED_IDS]
self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
for x in self.EC2_VALID__IDS])
def fake_show(meh, context, id):
return {'id': id,
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(self, context, **kwargs):
image = fake_show(self, context, None)
image['name'] = kwargs.get('name')
return [image]
fake.stub_out_image_service(self.stubs)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
super(EC2ValidateTestCase, self).tearDown()
fake.FakeImageService_reset()
#EC2_API tests (InvalidInstanceID.Malformed)
def test_console_output(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
self.cloud.get_console_output,
context=self.context,
instance_id=[ec2_id])
def test_describe_instance_attribute(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
self.cloud.describe_instance_attribute,
context=self.context,
instance_id=ec2_id,
attribute='kernel')
def test_instance_lifecycle(self):
lifecycle = [self.cloud.terminate_instances,
self.cloud.reboot_instances,
self.cloud.stop_instances,
self.cloud.start_instances,
]
for cmd in lifecycle:
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
cmd,
context=self.context,
instance_id=[ec2_id])
def test_create_image(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
self.cloud.create_image,
context=self.context,
instance_id=ec2_id)
def test_create_snapshot(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.create_snapshot,
context=self.context,
volume_id=ec2_id)
def test_describe_volumes(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.describe_volumes,
context=self.context,
volume_id=[ec2_id])
def test_delete_volume(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.delete_volume,
context=self.context,
volume_id=ec2_id)
def test_detach_volume(self):
for ec2_id, e in self.volume_id_exception_map:
self.assertRaises(e,
self.cloud.detach_volume,
context=self.context,
volume_id=ec2_id)
class EC2TimestampValidationTestCase(test.TestCase):
"""Test case for EC2 request timestamp validation."""
def test_validate_ec2_timestamp_valid(self):
params = {'Timestamp': '2011-04-22T11:29:49Z'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_ec2_timestamp_old_format(self):
params = {'Timestamp': '2011-04-22T11:29:49'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_timestamp_not_set(self):
params = {}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_ec2_timestamp_ms_time_regex(self):
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z')
self.assertIsNotNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z')
self.assertIsNotNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z')
self.assertIsNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123')
self.assertIsNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z')
self.assertIsNone(result)
def test_validate_ec2_timestamp_aws_sdk_format(self):
params = {'Timestamp': '2011-04-22T11:29:49.123Z'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(expired)
def test_validate_ec2_timestamp_invalid_format(self):
params = {'Timestamp': '2011-04-22T11:29:49.000P'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_timestamp_advanced_time(self):
#EC2 request with Timestamp in advanced time
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
params = {'Timestamp': timeutils.strtime(timestamp,
"%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertFalse(expired)
def test_validate_ec2_timestamp_advanced_time_expired(self):
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
params = {'Timestamp': timeutils.strtime(timestamp,
"%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(expired)
def test_validate_ec2_req_timestamp_not_expired(self):
params = {'Timestamp': timeutils.isotime()}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=15)
self.assertFalse(expired)
def test_validate_ec2_req_timestamp_expired(self):
params = {'Timestamp': '2011-04-22T12:00:00Z'}
compare = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(compare)
def test_validate_ec2_req_expired(self):
params = {'Expires': timeutils.isotime()}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_req_not_expired(self):
expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
params = {'Expires': timeutils.strtime(expire, "%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_Expires_timestamp_invalid_format(self):
#EC2 request with invalid Expires
params = {'Expires': '2011-04-22T11:29:49'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_req_timestamp_Expires(self):
#EC2 request with both Timestamp and Expires
params = {'Timestamp': '2011-04-22T11:29:49Z',
'Expires': timeutils.isotime()}
self.assertRaises(exception.InvalidRequest,
ec2utils.is_ec2_timestamp_expired,
params)
| {
"content_hash": "25b26a160c03e94edf7356f9cf5a9a1f",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 78,
"avg_line_length": 40.79761904761905,
"alnum_prop": 0.5817527477871802,
"repo_name": "qwefi/nova",
"id": "3a8d8b5a9dceb4749c0f0e2105a37dafe7465503",
"size": "11005",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/api/ec2/test_ec2_validate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11596912"
},
{
"name": "Shell",
"bytes": "17148"
}
],
"symlink_target": ""
} |
import clr
import System
from System.Collections.Generic import *
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
cutters = UnwrapElement(IN[0])
view = UnwrapElement(IN[1])
tol = IN[2]
filtercats = List[ElementId]([x.Id for x in IN[3]])
catfilter = ElementMulticategoryFilter(filtercats)
intersectorlist = list()
for cutter in cutters:
if cutter is None: intersectorlist.append([])
else:
bbox = cutter.get_BoundingBox(view)
bboxfilter = BoundingBoxIntersectsFilter(Outline(bbox.Min,bbox.Max),tol)
collector = FilteredElementCollector(doc, view.Id)
excludelist = []
excludelist.append(cutter.Id)
excludeIDs = List[ElementId](excludelist)
excfilter = ExclusionFilter(excludeIDs)
intersectorlist.append(collector.WherePasses(bboxfilter).WherePasses(excfilter).WherePasses(catfilter).ToElements())
OUT = intersectorlist | {
"content_hash": "d19ed6959dfceb005e1885c49031c6cd",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 118,
"avg_line_length": 30,
"alnum_prop": 0.7991228070175439,
"repo_name": "CAAD-RWTH/ClockworkForDynamo",
"id": "4895b09b06bee5399fee7c7f9c6fda40bde37855",
"size": "1140",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nodes/2.x/python/All Intersecting Elements Of Category By BoundingBox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "316146"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class SugarConfig(AppConfig):
name = 'sugar'
| {
"content_hash": "b87f35091aedcf133e7add625cf81e59",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17,
"alnum_prop": 0.7411764705882353,
"repo_name": "kensonman/mansonsolutions.sugar",
"id": "6cec5f6dbb2917d8b4a432580693c4da7c69c84c",
"size": "85",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sugar/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11002"
},
{
"name": "Nginx",
"bytes": "1318"
},
{
"name": "Python",
"bytes": "15726"
}
],
"symlink_target": ""
} |
import socket
import subprocess
# get hostname
hostname = socket.gethostname()
# get IP via shell
ip = subprocess.check_output(['hostname', '-I'])
# find first space and cut string at this index
ip4 = ip[:ip.index(" ")]
# show it
print("Running on host %s.", hostname)
print("IP4 address is %s.", ip4)
| {
"content_hash": "9c37c0648e65ba80d1914890a87104be",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 48,
"avg_line_length": 20.4,
"alnum_prop": 0.6928104575163399,
"repo_name": "markusk/minibot",
"id": "0bb5d00feb2893982ad46fc21e2fdecabb40ddab",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/hostinfo.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "804657"
},
{
"name": "C++",
"bytes": "8897"
},
{
"name": "CMake",
"bytes": "13960"
},
{
"name": "Gnuplot",
"bytes": "108022"
},
{
"name": "Python",
"bytes": "106383"
},
{
"name": "Shell",
"bytes": "7815"
}
],
"symlink_target": ""
} |
"""
Revision ID: 147da617670
Revises: None
Create Date: 2015-12-08 23:17:08.754842
"""
# revision identifiers, used by Alembic.
revision = '147da617670'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('message',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('message')
### end Alembic commands ###
| {
"content_hash": "80891b410e637623cd43d4167a91054a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 63,
"avg_line_length": 22.266666666666666,
"alnum_prop": 0.6676646706586826,
"repo_name": "imbstack/pasquino",
"id": "7421397f3942ab91f49a46f51cb48c4591285f15",
"size": "668",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "migrations/versions/147da617670.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "137543"
},
{
"name": "HTML",
"bytes": "2516"
},
{
"name": "JavaScript",
"bytes": "1451"
},
{
"name": "Makefile",
"bytes": "268"
},
{
"name": "Mako",
"bytes": "402"
},
{
"name": "Python",
"bytes": "5128"
}
],
"symlink_target": ""
} |
import getopt
import os
import re
import sys
from subprocess import Popen, PIPE, call
def usage():
u = ['Usage: cstyle [-w] [rev|rev1..rev2]',
'',
'By default, checks working tree against HEAD, or checks changes in',
'HEAD if the working tree is clean. With a revision option, checks',
'changes in rev or the series rev1..rev2. With the -w option,',
'checks working tree against rev (defaults to HEAD).']
sys.stderr.write('\n'.join(u) + '\n')
sys.exit(1)
# Run a command and return a list of its output lines.
def run(args):
# subprocess.check_output would be ideal here, but requires Python 2.7.
p = Popen(args, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if p.returncode != 0:
sys.stderr.write('Failed command: ' + ' '.join(args) + '\n')
if err != '':
sys.stderr.write('stderr:\n' + err)
sys.stderr.write('Unexpected command failure, exiting\n')
sys.exit(1)
return out.splitlines()
# Find the top level of the git working tree, or None if we're not in
# one.
def find_toplevel():
# git doesn't seem to have a way to do this, so we search by hand.
dir = os.getcwd()
while True:
if os.path.exists(os.path.join(dir, '.git')):
break
parent = os.path.dirname(dir)
if (parent == dir):
return None
dir = parent
return dir
# Check for style issues in a file within rev (or within the current
# checkout if rev is None). Report only problems on line numbers in
# new_lines.
line_re = re.compile(r'^\s*(\d+) (.*)$')
def check_file(filename, rev, new_lines):
# Process only C source files under src.
root, ext = os.path.splitext(filename)
if not filename.startswith('src/') or ext not in ('.c', '.h', '.hin'):
return
dispname = filename[4:]
if rev is None:
p1 = Popen(['cat', filename], stdout=PIPE)
else:
p1 = Popen(['git', 'show', rev + ':' + filename], stdout=PIPE)
p2 = Popen(['python', 'src/util/cstyle-file.py'], stdin=p1.stdout,
stdout=PIPE)
p1.stdout.close()
out, err = p2.communicate()
if p2.returncode != 0:
sys.exit(1)
first = True
for line in out.splitlines():
m = line_re.match(line)
if int(m.group(1)) in new_lines:
if first:
print ' ' + dispname + ':'
first = False
print ' ' + line
# Determine the lines of each file modified by diff (a sequence of
# strings) and check for style violations in those lines. rev
# indicates the version in which the new contents of each file can be
# found, or is None if the current contents are in the working copy.
chunk_header_re = re.compile(r'^@@ -\d+(,(\d+))? \+(\d+)(,(\d+))? @@')
def check_diff(diff, rev):
old_count, new_count, lineno = 0, 0, 0
filename = None
for line in diff:
if not line or line.startswith('\\ No newline'):
continue
if old_count > 0 or new_count > 0:
# We're in a chunk.
if line[0] == '+':
new_lines.append(lineno)
if line[0] in ('+', ' '):
new_count = new_count - 1
lineno = lineno + 1
if line[0] in ('-', ' '):
old_count = old_count - 1
elif line.startswith('+++ b/'):
# We're starting a new file. Check the last one.
if filename:
check_file(filename, rev, new_lines)
filename = line[6:]
new_lines = []
else:
m = chunk_header_re.match(line)
if m:
old_count = int(m.group(2) or '1')
lineno = int(m.group(3))
new_count = int(m.group(5) or '1')
# Check the last file in the diff.
if filename:
check_file(filename, rev, new_lines)
# Check a sequence of revisions for style issues.
def check_series(revlist):
for rev in revlist:
sys.stdout.flush()
call(['git', 'show', '-s', '--oneline', rev])
diff = run(['git', 'diff-tree', '--no-commit-id', '--root', '-M',
'--cc', rev])
check_diff(diff, rev)
# Parse arguments.
try:
opts, args = getopt.getopt(sys.argv[1:], 'w')
except getopt.GetoptError, err:
print str(err)
usage()
if len(args) > 1:
usage()
# Change to the top level of the working tree so we easily run the file
# checker and refer to working tree files.
toplevel = find_toplevel()
if toplevel is None:
sys.stderr.write('%s must be run within a git working tree')
os.chdir(toplevel)
if ('-w', '') in opts:
# Check the working tree against a base revision.
arg = 'HEAD'
if args:
arg = args[0]
check_diff(run(['git', 'diff', arg]), None)
elif args:
# Check the differences in a rev or a series of revs.
if '..' in args[0]:
check_series(run(['git', 'rev-list', '--reverse', args[0]]))
else:
check_series([args[0]])
else:
# No options or arguments. Check the differences against HEAD, or
# the differences in HEAD if the working tree is clean.
diff = run(['git', 'diff', 'HEAD'])
if diff:
check_diff(diff, None)
else:
check_series(['HEAD'])
| {
"content_hash": "1e5015f1193f9f7b4e0bc20611edc8de",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 78,
"avg_line_length": 32.67283950617284,
"alnum_prop": 0.5664084640090686,
"repo_name": "gerritjvv/cryptoplayground",
"id": "7c45335b09df28967d2244a32d3f176d8def445c",
"size": "6668",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kerberos/kdc/src/krb5-1.16/src/util/cstyle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "40918"
},
{
"name": "Awk",
"bytes": "10967"
},
{
"name": "Batchfile",
"bytes": "2734"
},
{
"name": "C",
"bytes": "14209534"
},
{
"name": "C++",
"bytes": "822611"
},
{
"name": "CMake",
"bytes": "486373"
},
{
"name": "CSS",
"bytes": "72712"
},
{
"name": "Emacs Lisp",
"bytes": "6797"
},
{
"name": "HTML",
"bytes": "10177760"
},
{
"name": "Java",
"bytes": "88477"
},
{
"name": "JavaScript",
"bytes": "91201"
},
{
"name": "Lex",
"bytes": "1395"
},
{
"name": "M4",
"bytes": "25420"
},
{
"name": "Makefile",
"bytes": "4976551"
},
{
"name": "NSIS",
"bytes": "94536"
},
{
"name": "Perl",
"bytes": "138102"
},
{
"name": "Perl 6",
"bytes": "7955"
},
{
"name": "Python",
"bytes": "493201"
},
{
"name": "RPC",
"bytes": "5974"
},
{
"name": "Roff",
"bytes": "340434"
},
{
"name": "Shell",
"bytes": "103572"
},
{
"name": "TeX",
"bytes": "2040204"
},
{
"name": "Yacc",
"bytes": "34752"
},
{
"name": "sed",
"bytes": "613"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
import time
import numpy as np
from pyqtgraph.Qt import QtGui
import pyqtgraph as pg
from pyqtgraph.dockarea import *
class GUI:
plot = []
curve = []
def __init__(self, width=800, height=450, title=''):
# Create GUI window
self.app = QtGui.QApplication([])
self.win = pg.GraphicsWindow(title)
self.win.resize(width, height)
self.win.setWindowTitle(title)
# Create GUI layout
self.layout = QtGui.QVBoxLayout()
self.win.setLayout(self.layout)
def add_plot(self, title):
new_plot = pg.PlotWidget()
self.layout.addWidget(new_plot)
self.plot.append(new_plot)
self.curve.append([])
def add_curve(self, plot_index, pen=(255, 255, 255)):
self.curve[plot_index].append(self.plot[plot_index].plot(pen=pen))
if __name__ == '__main__':
# Example test gui
N = 48
gui = GUI(title='Test')
# Sin plot
gui.add_plot(title='Sin Plot')
gui.add_curve(plot_index=0)
gui.win.nextRow()
# Cos plot
gui.add_plot(title='Cos Plot')
gui.add_curve(plot_index=1)
while True:
t = time.time()
x = np.linspace(t, 2 * np.pi + t, N)
gui.curve[0][0].setData(x=x, y=np.sin(x))
gui.curve[1][0].setData(x=x, y=np.cos(x))
gui.app.processEvents()
time.sleep(1.0 / 30.0)
| {
"content_hash": "7d000f8c994bcbf1d6ad24300e780d3d",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 74,
"avg_line_length": 27.862745098039216,
"alnum_prop": 0.5967628430682618,
"repo_name": "joeybab3/audio-reactive-led-strip",
"id": "869d5c09ad35c2345e2bdda3b5d6dcc107082b73",
"size": "1421",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/gui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2934"
},
{
"name": "Python",
"bytes": "31713"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from typing import Any, Awaitable
from msrest import Deserializer, Serializer
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from .. import models
from ._configuration import SIPRoutingServiceConfiguration
from .operations import SIPRoutingServiceOperationsMixin
class SIPRoutingService(SIPRoutingServiceOperationsMixin):
"""SipRouting Service.
:param endpoint: The communication resource, for example
https://resourcename.communication.azure.com.
:type endpoint: str
:keyword api_version: Api Version. Default value is "2021-05-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
endpoint: str,
**kwargs: Any
) -> None:
_base_url = '{endpoint}'
self._config = SIPRoutingServiceConfiguration(endpoint=endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=_base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "SIPRoutingService":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| {
"content_hash": "699745fa7d17c754d175b8e144076534",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 116,
"avg_line_length": 38.27272727272727,
"alnum_prop": 0.67119104173736,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2f2b9c446a36370a58e86f7eac55e8e70cf4a4c0",
"size": "3415",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/siprouting/_generated/aio/_sip_routing_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
Uses lsof to collect data on number of open files per user per type
#### Config Options
Check Options table below
*** Priority Explanation ***
This is an explanation of the priority in which users, groups, and uid, are
evaluated. EXCLUDE ALWAYS OVERRULES INCLUDE within the same level (ie within
users or group)
* user_include/exclude (top level/priority)
* group_include/exclude (second level: if user not in user_include/exclude,
groups takes affect)
* uid_min/max (third level: if user not met above qualifications, uids
take affect)
* type_include - This is a list of file types to collect ('REG', 'DIR", 'FIFO'
, etc). If left empty, will collect for all file types. (Note: it suggested
to not leave type_include empty, as it would add significant load to your
graphite box(es) (default = None)
* type_exclude - This is a list of tile types to exclude from being collected
for. If left empty, no file types will be excluded. (default = None)
* collect_user_data - This enables or disables the collection of user specific
file handles. (default = False)
#### Dependencies
* /proc/sys/fs/file-nr
* /usr/sbin/lsof
"""
import diamond.collector
import re
import os
_RE = re.compile(r'(\d+)\s+(\d+)\s+(\d+)')
class FilestatCollector(diamond.collector.Collector):
PROC = '/proc/sys/fs/file-nr'
def get_default_config_help(self):
config_help = super(FilestatCollector, self).get_default_config_help()
config_help.update({
'user_include': "This is list of users to collect data for."
" If this is left empty, its a wildcard"
" to collector for all users"
" (default = None)",
'user_exclude': "This is a list of users to exclude"
" from collecting data. If this is left empty,"
" no specific users will be excluded"
" (default = None)",
'group_include': "This is a list of groups to include"
" in data collection. This DOES NOT"
" override user_exclude."
" (default = None)",
'group_exclude': "This is a list of groups to exclude"
" from collecting data. It DOES NOT override"
" user_include. (default = None)",
'uid_min': "This creates a floor for the user's uid."
" This means that it WILL NOT collect data"
" for any user with a uid LOWER"
" than the specified minimum,"
" unless the user is told to be included"
" by user_include (default = 0)",
'uid_max': "This creates a ceiling for the user's uid."
" This means that it WILL NOT collect data"
" for any user with a uid HIGHER"
" than the specified maximum,"
" unless the user is told to be included"
" by user_include (default = 65536)",
'type_include': "This is a list of file types to collect"
" ('REG', 'DIR', 'FIFO', etc). If left empty,"
" will collect for all file types."
"(Note: it's suggested to not leave"
" type_include empty,"
" as it would add significant load"
" to your graphite box(es) (default = None)",
'type_exclude': "This is a list of tile types to exclude"
" from being collected for. If left empty,"
" no file types will be excluded. (default = None)",
'collect_user_data': "This enables or disables"
" the collection of user specific"
" file handles. (default = False)"
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(FilestatCollector, self).get_default_config()
config.update({
'path': 'files',
'user_include': None,
'user_exclude': None,
'group_include': None,
'group_exclude': None,
'uid_min': 0,
'uid_max': 65536,
'type_include': None,
'type_exclude': None,
'collect_user_data': False
})
return config
def get_userlist(self):
"""
This collects all the users with open files on the system, and filters
based on the variables user_include and user_exclude
"""
# convert user/group lists to arrays if strings
if isinstance(self.config['user_include'], basestring):
self.config['user_include'] = self.config['user_include'].split()
if isinstance(self.config['user_exclude'], basestring):
self.config['user_exclude'] = self.config['user_exclude'].split()
if isinstance(self.config['group_include'], basestring):
self.config['group_include'] = self.config['group_include'].split()
if isinstance(self.config['group_exclude'], basestring):
self.config['group_exclude'] = self.config['group_exclude'].split()
rawusers = os.popen("lsof | awk '{ print $3 }' | sort | uniq -d"
).read().split()
userlist = []
# remove any not on the user include list
if ((self.config['user_include'] is None or
len(self.config['user_include']) == 0)):
userlist = rawusers
else:
# only work with specified include list, which is added at the end
userlist = []
# add any user in the group include list
addedByGroup = []
if ((self.config['group_include'] is not None and
len(self.config['group_include']) > 0)):
for u in rawusers:
self.log.info(u)
# get list of groups of user
user_groups = os.popen("id -Gn %s" % (u)).read().split()
for gi in self.config['group_include']:
if gi in user_groups and u not in userlist:
userlist.append(u)
addedByGroup.append(u)
break
# remove any user in the exclude group list
if ((self.config['group_exclude'] is not None and
len(self.config['group_exclude']) > 0)):
# create tmp list to iterate over while editing userlist
tmplist = userlist[:]
for u in tmplist:
# get list of groups of user
groups = os.popen("id -Gn %s" % (u)).read().split()
for gi in self.config['group_exclude']:
if gi in groups:
userlist.remove(u)
break
# remove any that aren't within the uid limits
# make sure uid_min/max are ints
self.config['uid_min'] = int(self.config['uid_min'])
self.config['uid_max'] = int(self.config['uid_max'])
tmplist = userlist[:]
for u in tmplist:
if ((self.config['user_include'] is None or
u not in self.config['user_include'])):
if u not in addedByGroup:
uid = int(os.popen("id -u %s" % (u)).read())
if ((uid < self.config['uid_min'] and
self.config['uid_min'] is not None and
u in userlist)):
userlist.remove(u)
if ((uid > self.config['uid_max'] and
self.config['uid_max'] is not None and
u in userlist)):
userlist.remove(u)
# add users that are in the users include list
if ((self.config['user_include'] is not None and
len(self.config['user_include']) > 0)):
for u in self.config['user_include']:
if u in rawusers and u not in userlist:
userlist.append(u)
# remove any that is on the user exclude list
if ((self.config['user_exclude'] is not None and
len(self.config['user_exclude']) > 0)):
for u in self.config['user_exclude']:
if u in userlist:
userlist.remove(u)
return userlist
def get_typelist(self):
"""
This collects all avaliable types and applies include/exclude filters
"""
typelist = []
# convert type list into arrays if strings
if isinstance(self.config['type_include'], basestring):
self.config['type_include'] = self.config['type_include'].split()
if isinstance(self.config['type_exclude'], basestring):
self.config['type_exclude'] = self.config['type_exclude'].split()
# remove any not in include list
if self.config['type_include'] is None or len(
self.config['type_include']) == 0:
typelist = os.popen("lsof | awk '{ print $5 }' | sort | uniq -d"
).read().split()
else:
typelist = self.config['type_include']
# remove any in the exclude list
if self.config['type_exclude'] is not None and len(
self.config['type_include']) > 0:
for t in self.config['type_exclude']:
if t in typelist:
typelist.remove(t)
return typelist
def process_lsof(self, users, types):
"""
Get the list of users and file types to collect for and collect the
data from lsof
"""
d = {}
for u in users:
d[u] = {}
tmp = os.popen("lsof -bu %s | awk '{ print $5 }'" % (
u)).read().split()
for t in types:
d[u][t] = tmp.count(t)
return d
def collect(self):
if not os.access(self.PROC, os.R_OK):
return None
# collect total open files
file = open(self.PROC)
for line in file:
match = _RE.match(line)
if match:
self.publish('assigned', int(match.group(1)))
self.publish('unused', int(match.group(2)))
self.publish('max', int(match.group(3)))
file.close()
# collect open files per user per type
if self.config['collect_user_data']:
data = self.process_lsof(self.get_userlist(), self.get_typelist())
for ukey in data.iterkeys():
for tkey in data[ukey].iterkeys():
self.log.debug('files.user.%s.%s %s' % (
ukey, tkey, int(data[ukey][tkey])))
self.publish('user.%s.%s' % (ukey, tkey),
int(data[ukey][tkey]))
| {
"content_hash": "005aeed0d4daecc7f274e0d78c297cd6",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 80,
"avg_line_length": 42.21132075471698,
"alnum_prop": 0.5180582871446451,
"repo_name": "zoidbergwill/Diamond",
"id": "a7016f9b66583d77aee81229cae2d4d3daad5e90",
"size": "11202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/collectors/filestat/filestat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "17806"
},
{
"name": "HTML",
"bytes": "21681"
},
{
"name": "Makefile",
"bytes": "4359"
},
{
"name": "Python",
"bytes": "1360761"
},
{
"name": "Ruby",
"bytes": "230"
},
{
"name": "Shell",
"bytes": "12795"
}
],
"symlink_target": ""
} |
from django.contrib.syndication.feeds import Feed
from django.core.urlresolvers import reverse
from basic.blog.models import Settings
from django_proxy.models import Proxy
from tagging.models import Tag, TaggedItem
class AllEntries(Feed):
_settings = None
@property
def settings(self):
if self._settings is None:
self._settings = Settings.get_current()
return self._settings
def title(self):
return '%s all entries feed' % self.settings.site_name
def description(self):
return 'All entries published and updated on %s' % self.settings.site_name
def author_name(self):
return self.settings.author_name
def copyright(self):
return self.settings.copyright
def link(self):
return 'http://%s' % self.settings.site.domain
def items(self):
return Proxy.objects.published().order_by('-pub_date')[:10]
def item_link(self, item):
return item.content_object.get_absolute_url()
def item_categories(self, item):
tags = item.tags
# Avoid problems with empty/null values:
if not tags:
return list()
else:
return tags.replace(',', '').split()
class ByTag(AllEntries):
@property
def settings(self):
if self._settings is None:
self._settings = Settings.get_current()
return self._settings
def title(self):
return '%s all entries feed' % self.settings.site_name
def get_object(self, bits):
if len(bits) != 1:
raise ObjectDoesNotExist
return Tag.objects.get(name__exact=bits[0])
def link(self, obj):
if not obj:
raise FeedDoesNotExist
return reverse('blog_tag_detail', kwargs={'slug':obj.name})
def description(self, obj):
return "Posts recently tagged as %s" % obj.name
def item_link(self, item):
return item.content_object.get_absolute_url()
def items(self, obj):
return Proxy.objects.published().filter(
tags__icontains=obj.name
).order_by('-pub_date')[:10]
| {
"content_hash": "87fcb3798c8c9adbe379838374f9a223",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 82,
"avg_line_length": 27.256410256410255,
"alnum_prop": 0.6222953904045155,
"repo_name": "azizmb/django-mingus",
"id": "c5acf882ca32e31e52c3f9f6975a9d72f0fc0533",
"size": "2126",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mingus/core/feeds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "2585652"
},
{
"name": "PHP",
"bytes": "48646"
},
{
"name": "Python",
"bytes": "28903"
}
],
"symlink_target": ""
} |
"""
OneResume - data-driven resume generation
Usage:
one_resume single -t <template-file> -y <yaml-file> -o <output-file> -f <format> [-v|-d]
one_resume batch -c <config-file> [-v|-d]
one_resume -h | --help
one_resume --version
Options:
-h --help Show this screen
-v --verbose Verbose logging
-d --debug Debug logging
-t <template-file> Template file (input)
-y <yaml-file> Resume content (YAML file)
-o <output-file> Output file
-f <format> Format (can be either Word or Text)
-c <config-file> Configuration file (YAML) for batch generation
"""
from __future__ import print_function
from version import __version__
import docopt
import sys, os
import logging
import yaml
from plugin import Plugin
def error(text):
print("ERROR: %s" % text)
sys.exit(-1)
def yaml_include(loader, node):
# Get the path out of the yaml file
file_name = os.path.join(os.path.dirname(loader.name), node.value)
print (file_name)
with file(file_name) as inputfile:
return yaml.load(inputfile)
yaml.add_constructor("!include", yaml_include)
class OneResume(object):
def __init__ (self):
Plugin.load()
self.allowed_filetypes = []
self.allowed_formats = []
for p, p_class in Plugin.registered.items():
print("Registered output plugin type %s" % p)
self.allowed_filetypes.append(p_class.template_file_extension)
self.allowed_formats.append(p.split('Resume')[0])
def getOptions(self, args):
#print (args)
self.debug = args['--debug']
self.verbose = args['--verbose']
if self.debug: logging.basicConfig(level=logging.DEBUG, format='%(message)s')
if self.verbose: logging.basicConfig(level=logging.INFO, format='%(message)s')
if args['single']:
self.config = yaml.load("""-
data: %(-y)s
outputs:
-
format: %(-f)s
template: %(-t)s
output: %(-o)s
""" % (args))
elif args['batch']:
config_file = args['-c']
with open(config_file) as f:
logging.debug("Reading configuration file %s" % config_file)
self.config = yaml.load(f)
else:
assert False, "docopt command line parsing broken??"
def run_rendering(self):
"""
Based on self.config, instantiate each plugin conversion and run it
"""
if not isinstance(self.config, list):
# If the config was not a list, just convert this one element into a list
self.config = [self.config]
for i, c in enumerate(self.config):
# For each conversion
if not 'data' in c:
# Check that the yaml resume file is specified
error("Configuration file has not defined 'data' with resume yaml file")
else:
with open(c['data']) as resume_file:
self.resume = yaml.load(resume_file)
for output in c['outputs']:
fmt = output['format']
# Check that we have a plugin whose classname starts with this format
assert any([x.startswith(fmt) for x in Plugin.registered])
template_file = output['template']
filebasename,filetype = os.path.splitext(template_file)
if filetype[1:] not in self.allowed_filetypes:
error("File type/extension %s is not one of following: %s" % (filetype,' '.join(self.allowed_filetypes)))
output_filename = output['output']
# Instantiate the required conversion plugin
print ("Creating %s ..." % output_filename, end='')
text = Plugin.registered['%sResume' % fmt](template_file, self.resume, False)
text.render(output_filename)
print (" done")
def go(self, args):
# Read the command line options, already parsed into a dict by docopt
self.getOptions(args)
self.run_rendering()
def main(): #pragma: no cover
args = docopt.docopt(__doc__, version='OneResume %s' % __version__)
script = OneResume()
script.go(args)
if __name__ == '__main__':
main()
| {
"content_hash": "ee07d24ea015416bfcbc085f749bc697",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 125,
"avg_line_length": 36.63779527559055,
"alnum_prop": 0.5359982806791317,
"repo_name": "virantha/one_resume",
"id": "d1b626fb28e723e1d5356a0279e7b306c0835ea8",
"size": "5282",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "one_resume/one_resume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "606"
},
{
"name": "Python",
"bytes": "27809"
},
{
"name": "Shell",
"bytes": "1857"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import zerver.models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0093_subscription_event_log_backfill'),
]
operations = [
migrations.AlterField(
model_name='realmfilter',
name='url_format_string',
field=models.TextField(validators=[django.core.validators.URLValidator(), zerver.models.filter_format_validator]),
),
]
| {
"content_hash": "250891eaacd30c9f9adc52ff3ac1fd8c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 126,
"avg_line_length": 26.65,
"alnum_prop": 0.6679174484052532,
"repo_name": "vabs22/zulip",
"id": "b9af3bc83ff497c6455234b035533ee96d3431e5",
"size": "606",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/migrations/0094_realm_filter_url_validator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "404100"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "468187"
},
{
"name": "JavaScript",
"bytes": "2088122"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "87465"
},
{
"name": "Python",
"bytes": "3556117"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "46689"
}
],
"symlink_target": ""
} |
"""This module contains the Student Model."""
__authors__ = [
'"Todd Larsen" <tlarsen@google.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.models import countries
import soc.models.role
import soc.models.school
class Student(soc.models.role.Role):
"""Student details for a specific Program.
"""
school_name = db.StringProperty(required=True,
verbose_name=ugettext('School Name'))
school_name.group = ugettext("5. Education")
school_name.help_text = ugettext(
'Please enter the full name of your school, college or university in'
' this field. Please use the complete formal name of your school, e.g.'
' UC Berekeley instead of Cal or UCB. It would be most wonderful if you'
' could provide your school\'s name in English, as all the program '
'administrators speak English as their first language and it will make'
' it much easier for us to assemble program statistics, etc., later if'
' we can easily read the name of your school.')
school_country = db.StringProperty(required=True,
verbose_name=ugettext('School Country/Territory'),
choices=countries.COUNTRIES_AND_TERRITORIES)
school_country.group = ugettext("5. Education")
major = db.StringProperty(required=True,
verbose_name=ugettext('Major Subject'))
major.group = ugettext("5. Education")
# TODO add more degrees because this should be used in GHOP as well
degree = db.StringProperty(required=True,
verbose_name=ugettext('Degree'),
choices=['Undergraduate', 'Master', 'PhD'])
degree.group = ugettext("5. Education")
expected_graduation = db.IntegerProperty(required=True,
verbose_name=ugettext('Expected Graduation Year'))
expected_graduation.help_text = ugettext("Year in integer format only!")
expected_graduation.example_text = ugettext('Year only, for example "2012"')
expected_graduation.group = ugettext("5. Education")
#: Property to gain insight into where students heard about this program
program_knowledge = db.TextProperty(required=True, verbose_name=ugettext(
"How did you hear about this program?"))
program_knowledge.help_text = ugettext("Please be as "
"specific as possible, e.g. blog post (include URL if possible), mailing "
"list (please include list address), information session (please include "
"location and speakers if you can), etc.")
program_knowledge.group = ugettext("4. Private Info")
#: A many:1 relationship that ties multiple Students to the
#: School that they attend.
school = db.ReferenceProperty(reference_class=soc.models.school.School,
required=False, collection_name='students')
can_we_contact_you = db.BooleanProperty(verbose_name=ugettext(
'Can we contact you?'))
can_we_contact_you.help_text = ugettext(
'Please check here if you would not mind being contacted by the Program'
' Administrators for follow up with members of the press who would like'
' to interview you about the program. You will not be contacted unless '
' you successfully complete your project. <br />'
'<b>Please note that checking this box has no effect on your chances'
' of being accepted into the program</b>.')
can_we_contact_you.group = ugettext("2. Contact Info (Private)")
| {
"content_hash": "f9c733371368a91349e5adaf489cbcee",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 80,
"avg_line_length": 44.26923076923077,
"alnum_prop": 0.7103967564436722,
"repo_name": "jamslevy/gsoc",
"id": "caaeeb32e7927ec446ce0f234c9b4ce15bbefba7",
"size": "4061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/soc/models/student.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "JavaScript",
"bytes": "388268"
},
{
"name": "Perl",
"bytes": "66733"
},
{
"name": "Python",
"bytes": "8290513"
},
{
"name": "Shell",
"bytes": "5570"
}
],
"symlink_target": ""
} |
from builtins import str
import re
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
UNQUERYABLE_PATTERN = re.compile('\.[a-zA-Z]+$')
QUERY_PATTERN = re.compile('/\d+(/\d+x\d+)?$')
def get_body(content_dict):
"""
Get the content item body or caption, whatever it's called
"""
content_item = find_content_item(content_dict)
if content_item.get('body', False):
body = content_item['body'] or ''
elif content_item.get('caption', False):
body = content_item['caption'] or ''
elif content_item.get('short_description', False):
body = content_item['short_description'] or ''
else:
body = ''
return br_to_p(strip_runtime_tags(body))
def get_brief(content_dict, words=60):
"""
Get the abstract or brief for an item, or make one to length 'words' from
the body
"""
if content_dict.get('abstract', False):
return content_dict['abstract']
else:
content_item = find_content_item(content_dict)
if content_item.get('body', False):
brief = content_item['body'] or ''
elif content_item.get('caption', False):
brief = content_item['caption'] or ''
elif content_item.get('short_description', False):
brief = content_item['short_description'] or ''
else:
brief = ''
return truncate_words(
strip_tags(br_to_space(brief)), words)
def get_headline(content_dict):
"""
Get the headline for this item
"""
if content_dict.get('headline', False):
return content_dict['headline']
else:
content_item = find_content_item(content_dict)
return content_item['title']
def get_url(content_dict):
"""
Get the p2p url for this item, or if it's a link, get the link url
"""
content_item = find_content_item(content_dict)
link_types = ('hyperlink', 'storylink')
if (
'content_item_type_code' in content_item and
content_item['content_item_type_code'] in link_types
):
return content_item['url'] if 'url' in content_item else ""
else:
return content_item['web_url']
def get_thumb_url(content_dict, size, ratio=None):
"""
Find a thumbnail url in the content item dictionary and adjust the size
and ratio parameters before returning the url. Pass 'None' for size to get
a url without any size or ratio params.
"""
content_item = find_content_item(content_dict)
# If image_url already contains query, replace it; otherwise, append query.
if (
'photo_services_url' in content_item and
content_item['photo_services_url']
):
image_url = content_item['photo_services_url']
elif (
'alt_thumbnail_url' in content_item and
content_item['alt_thumbnail_url']
):
image_url = content_item['alt_thumbnail_url']
elif 'thumbnail_url' in content_item and content_item['thumbnail_url']:
image_url = content_item['thumbnail_url']
else:
return ""
# If image_url ends in .jpg or any other filename, can't use query with it
if UNQUERYABLE_PATTERN.search(image_url):
return image_url
if size is None:
query = ''
elif ratio is None:
query = str(size)
else:
query = '/'.join([str(size), ratio])
if QUERY_PATTERN.search(image_url):
ret = QUERY_PATTERN.sub('/' + query, image_url)
else:
ret = '/'.join([image_url.rstrip('/'), query])
return ret.rstrip('/')
def get_byline(content_dict):
"""
Get the byline for this item
"""
if 'byline' in content_dict:
return content_dict['byline']
else:
return find_content_item(content_dict)['byline']
def get_time(content_dict):
if content_dict.get('display_time', False):
return content_dict['display_time']
elif 'content_item' in content_dict:
if content_dict['content_item'].get('display_time', False):
return content_dict['content_item']['display_time']
elif content_dict['content_item'].get('create_time', False):
return content_dict['content_item']['create_time']
else:
return content_dict['create_time']
def get_featured_related_item(content_dict):
"""
Look through related items to find the first photo, gallery or video
"""
content_item = find_content_item(content_dict)
feature_types = (
'embeddedvideo', 'photogallery', 'photo', 'premiumvideo')
for item in content_item['related_items']:
if item['content_item_type_code'] in feature_types:
return item
def get_custom_param_value(content_dict, param_key, default_value='null'):
"""
Looks through a content_dict's custom params and returns
the value of the given key or default_value if not found
"""
value = None
for entry in content_dict['programmed_custom_params']:
if entry['key'] == param_key:
value = entry.get('value', entry.get('clob_value'))
if value:
return value
return default_value
def find_content_item(content_dict):
if 'content_item' in content_dict:
return content_dict['content_item']
else:
return content_dict
def br_to_space(text):
return re.sub(r'<br[^>]*?>\s*?( )?\s*?<br[^>]*?>', ' ', text)
def split_paragraphs(value):
"""
Take a block of text and return an array of paragraphs. Only works if
paragraphs are denoted by <p> tags and not double <br>.
Use `br_to_p` to convert text with double <br>s to <p> wrapped paragraphs.
"""
value = re.sub(r'</p>\s*?<p>', u'</p>\n\n<p>', value)
paras = re.split('\n{2,}', value)
return paras
def br_to_p(value):
"""
Converts text where paragraphs are separated by two <br> tags to text
where the paragraphs are wrapped by <p> tags.
"""
value = re.sub(r'<br\s*?/?>\s*?( )?\s*?<br\s*?/?>', u'\n\n', value)
paras = re.split('\n{2,}', value)
paras = [u'<p>%s</p>' % p.strip() for p in paras if p]
paras = u'\n\n'.join(paras)
paras = re.sub(r'</p\s*?>\s*?</p\s*?>', u'</p>', paras)
paras = re.sub(r'<p\s*?>\s*?<p\s*?>', u'<p>', paras)
paras = re.sub(r'<p\s*?>\s*?( )?\s*?</p\s*?>', u'', paras)
return paras
def section_heads(value):
"""
Search through a block of text and replace <p><b>text</b></p>
with <h4>text</h4>
"""
value = re.sub(r'<p>\s*?<b>([^<]+?)</b>\s*?</p>', u'<h4>\\1</h4>', value)
return value
def strip_runtime_tags(value):
return re.sub(r'</?runtime:[^>]*?>', '', value)
def truncate_words(content, words=60, suffix='...'):
word_list = re.split('\s+', force_unicode(content))
if len(word_list) <= words:
return content
return u' '.join(word_list[:words]) + force_unicode(suffix)
# http://stackoverflow.com/questions/2584885/strip-tags-python
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
return re.sub(r'<[^>]*?>', '', force_unicode(value))
# http://www.codigomanso.com/en/2010/05/una-de-python-force_unicode/
def force_unicode(s, encoding='utf-8', errors='ignore'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
"""
if s is None:
return ''
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = str(s)
else:
try:
s = str(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join(
[force_unicode(arg, encoding, errors) for arg in s])
elif not isinstance(s, str):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise UnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, errors) for arg in s])
return s
| {
"content_hash": "c7ed3b5461fa7fa7470e30c49dfbb804",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 79,
"avg_line_length": 32.13986013986014,
"alnum_prop": 0.5938859878154917,
"repo_name": "datadesk/p2p-python",
"id": "8287973440c36b584bc0b557921b522efd48c75d",
"size": "9192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p2p/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "346"
},
{
"name": "Python",
"bytes": "129495"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
import shutil
from setuptools import setup
def _read(fn):
path = os.path.join(os.path.dirname(__file__), fn)
return open(path).read()
# Build manpages if we're making a source distribution tarball.
if 'sdist' in sys.argv:
# Go into the docs directory and build the manpage.
docdir = os.path.join(os.path.dirname(__file__), 'docs')
curdir = os.getcwd()
os.chdir(docdir)
try:
subprocess.check_call(['make', 'man'])
finally:
os.chdir(curdir)
# Copy resulting manpages.
mandir = os.path.join(os.path.dirname(__file__), 'man')
if os.path.exists(mandir):
shutil.rmtree(mandir)
shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)
setup(name='beets',
version='1.0b15',
description='music tagger and library organizer',
author='Adrian Sampson',
author_email='adrian@radbox.org',
url='http://beets.radbox.org/',
license='MIT',
platforms='ALL',
long_description=_read('README.rst'),
test_suite='test.testall.suite',
include_package_data=True, # Install plugin resources.
packages=[
'beets',
'beets.ui',
'beets.autotag',
'beets.util',
'beetsplug',
'beetsplug.bpd',
'beetsplug.web',
'beetsplug.lastgenre',
],
namespace_packages=['beetsplug'],
entry_points={
'console_scripts': [
'beet = beets.ui:main',
],
},
install_requires=[
'mutagen',
'munkres',
'unidecode',
'musicbrainzngs',
] + (['colorama'] if (sys.platform == 'win32') else []),
classifiers=[
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Development Status :: 4 - Beta',
],
)
| {
"content_hash": "0e6f67dc9be3f3caaf2906d94751aea2",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 66,
"avg_line_length": 27.985915492957748,
"alnum_prop": 0.5636638147961751,
"repo_name": "aspidites/beets",
"id": "df48b94aa5a57d16d3bc2c50a6bbb25c6c054d9d",
"size": "2658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "85314"
},
{
"name": "Python",
"bytes": "672147"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import click
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
import kolibri
from ...utils import dbrestore
from ...utils import default_backup_folder
from ...utils import get_dtm_from_backup_name
from ...utils import search_latest
from kolibri.utils import server
logger = logging.getLogger(__name__)
class Command(BaseCommand):
output_transaction = True
# @ReservedAssignment
help = (
"Restores a database backup of Kolibri. This is not intended for "
"replication across different devices, but *only* for restoring a "
"single device from a local backup of the database."
)
def add_arguments(self, parser):
parser_group = parser.add_mutually_exclusive_group(required=True)
parser_group.add_argument(
"dump_file",
nargs="?",
type=str,
help="Specifies the exact dump file to restore from",
)
parser_group.add_argument(
"--latest",
"-l",
action="store_true",
dest="latest",
help=(
"Automatically detect and restore from latest backup matching "
"the major and minor version (X.Y) of current installation."
),
)
parser_group.add_argument(
"--select",
"-s",
action="store_true",
dest="select",
help=(
"Show the list of the last 10 backups Kolibri has done automatically "
"for the user to select which one must be restored."
),
)
def fetch_latest(self, dumps_root):
"""
Returns the latest backup file available in the dumps_root directory
"""
use_backup = None
# Ultimately, we are okay about a backup from a minor release
fallback_version = ".".join(map(str, kolibri.VERSION[:2]))
if os.path.exists(dumps_root):
use_backup = search_latest(dumps_root, fallback_version)
if not use_backup:
raise CommandError(
"Could not find a database backup for version: {}".format(
fallback_version
)
)
return use_backup
def select_backup(self, dumps_root):
"""
Returns the latest 10 dumps available in the dumps_root directory.
Dumps are sorted by date, latests first
"""
backups = []
if os.path.exists(dumps_root):
backups = os.listdir(dumps_root)
backups = filter(lambda f: f.endswith(".dump"), backups)
backups = list(backups)
backups.sort(key=get_dtm_from_backup_name, reverse=True)
backups = backups[:10] # don't show more than 10 backups
if not backups:
raise CommandError("Could not find a database backup}")
# Shows a list of options to select from
selected_backup = click.prompt(
"Type the number in brackets to select the backup to be restored\n"
+ "".join(
(
"({num}) {backup}\n".format(
num=num + 1, backup=get_dtm_from_backup_name(backup)
)
for num, backup in enumerate(backups)
)
),
type=click.Choice([str(i) for i in range(1, len(backups) + 1)]),
)
return os.path.join(dumps_root, backups[int(selected_backup) - 1])
def handle(self, *args, **options):
try:
server.get_status()
self.stderr.write(
self.style.ERROR(
"Cannot restore while Kolibri is running, please run:\n"
"\n"
" kolibri stop\n"
)
)
raise SystemExit()
except server.NotRunning:
# Great, it's not running!
pass
latest = options["latest"]
select = options["select"]
use_backup = options.get("dump_file", None)
logger.info("Beginning database restore")
search_root = default_backup_folder()
if latest:
use_backup = self.fetch_latest(search_root)
elif select:
use_backup = self.select_backup(search_root)
logger.info("Using backup file: {}".format(use_backup))
if not os.path.isfile(use_backup):
raise CommandError("Couldn't find: {}".format(use_backup))
dbrestore(use_backup)
self.stdout.write(
self.style.SUCCESS("Restored database from: {path}".format(path=use_backup))
)
| {
"content_hash": "3015c2b4d94baef79b7f33e3efadfc75",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 88,
"avg_line_length": 33.11643835616438,
"alnum_prop": 0.5607032057911066,
"repo_name": "mrpau/kolibri",
"id": "b1aa1faa1dca2daa6a0d34cea32e523612579cdd",
"size": "4835",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "kolibri/core/deviceadmin/management/commands/dbrestore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "601"
},
{
"name": "CSS",
"bytes": "1716299"
},
{
"name": "Dockerfile",
"bytes": "7303"
},
{
"name": "Gherkin",
"bytes": "278074"
},
{
"name": "HTML",
"bytes": "26440"
},
{
"name": "JavaScript",
"bytes": "1537923"
},
{
"name": "Makefile",
"bytes": "13308"
},
{
"name": "Python",
"bytes": "2298911"
},
{
"name": "Shell",
"bytes": "11777"
},
{
"name": "Vue",
"bytes": "1558714"
}
],
"symlink_target": ""
} |
import pytest
from bst import Bst
def test_constructor():
instance = Bst()
assert type(instance) is Bst
def test_insert(empty_tree):
a = empty_tree
a.insert(5)
a.insert(7)
assert 7 is a.top.right.data
a.insert(8)
a.insert(6)
a.insert(9)
assert a.top.data == 5
assert a.size() == 5
a.insert(9)
#test that a duplicate value does not get added
assert a.size() == 5
def test_depth(populated_tree):
assert populated_tree.depth() == 4
def test_depth_empty(empty_tree):
assert empty_tree.depth() is None
def test_depth_one():
b = Bst()
b.insert(100)
assert b.depth() == 1
def test_balance(populated_tree):
assert populated_tree.balance() == -2
b = Bst()
b.insert(5)
b.insert(1)
assert b.balance() == 1
def test_containts(populated_tree):
assert populated_tree.contains(9)
assert populated_tree.contains(2) is False
def test_in_order(populated_tree):
gen = populated_tree.in_order()
assert next(gen) == 3
assert next(gen) == 5
def test_pre_order(populated_tree):
gen = populated_tree.pre_order()
assert next(gen) == 5
assert next(gen) == 3
assert next(gen) == 7
assert next(gen) == 6
def test_post_order(populated_tree):
gen = populated_tree.post_order()
assert next(gen) == 3
assert next(gen) == 6
assert next(gen) == 9
def test_breadth_first(populated_tree):
gen = populated_tree.breadth_first()
assert next(gen) == 5
assert next(gen) == 3
assert next(gen) == 7
def test_deletion(populated_tree):
"""Test deletion of a value"""
populated_tree.delete(7)
assert populated_tree.top.right.data == 8
assert 7 not in populated_tree.set
def test_deletion_bt(big_ass_tree):
"""Test deletion of a value"""
big_ass_tree.delete(120)
assert big_ass_tree.top.right.data == 123
big_ass_tree.delete(25)
assert big_ass_tree.top.left.right.data == 33
def test_delete_empty(empty_tree):
"""Confirm that a delted node is not tracking in the tree"""
assert empty_tree.delete(1234) is None
def test_balance_tree(populated_tree):
"""Test that a rebalanced tree has a depth equal to
the binary power plus one. This ensures balance"""
a = populated_tree.balance_self()
assert a.balance() == 0
assert a.depth() == 3
def test_balance_bigtree(big_ass_tree):
"""duplicate test with a bigger list
This ensures balance"""
a = big_ass_tree.balance_self()
assert a.balance() == 0
assert a.depth() == 5
def test_balance_tree_zero(empty_tree):
"""Test balance for a empty tree"""
a = empty_tree.balance_self()
assert a is None
##################################
# Testing Fixtures
##################################
@pytest.fixture(scope='function')
def populated_tree():
tree = Bst()
tree.insert(5)
tree.insert(7)
tree.insert(8)
tree.insert(6)
tree.insert(9)
tree.insert(3)
return tree
@pytest.fixture(scope='function')
def empty_tree():
tree = Bst()
return tree
@pytest.fixture(scope='function')
def big_ass_tree():
tree = Bst()
tree.insert(100)
tree.insert(14)
tree.insert(25)
tree.insert(77)
tree.insert(98)
tree.insert(120)
tree.insert(33)
tree.insert(145)
tree.insert(66)
tree.insert(111)
tree.insert(200)
tree.insert(22)
tree.insert(188)
tree.insert(77)
tree.insert(101)
tree.insert(84)
tree.insert(123)
tree.insert(140)
tree.insert(20)
tree.insert(50)
return tree
| {
"content_hash": "760f38ce846fddbb7e5ac110573f8bf5",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 64,
"avg_line_length": 21.97530864197531,
"alnum_prop": 0.625,
"repo_name": "edpark13/data_structure2",
"id": "564a00c52aa7f5381280e2a375dff0ff39156e05",
"size": "3584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_bst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14020"
}
],
"symlink_target": ""
} |
import sys
from restorm.examples.mock.api import LibraryApiClient
def main(argv):
"""
Start with::
python -m restorm.examples.mock.library_serv [port or address:port]
"""
ip_address = '127.0.0.1'
port = 8000
# This is an example. Your should do argument checking.
if len(argv) == 1:
ip_address_port = argv[0].split(':', 1)
if len(ip_address_port) == 1:
port = ip_address_port[0]
else:
ip_address, port = ip_address_port
# Create a playground HTTP server that handles requests from the
# ``LibraryApiClient``.
api = LibraryApiClient('http://%s:%s/api/' % (ip_address, port))
server = api.create_server(ip_address, int(port))
print 'Mock library webservice is running at http://%s:%s/api/' % (ip_address, port)
print 'Quit the server with CTRL-C.'
try:
server.serve_forever()
except KeyboardInterrupt:
print 'Closing server...'
server.socket.close()
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "7d4d5e21e17817907a4f880cb84d8c54",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 88,
"avg_line_length": 27.153846153846153,
"alnum_prop": 0.5967894239848914,
"repo_name": "josesanch/restorm",
"id": "262ac80ea4e05635ca7e18fdce672f345d178251",
"size": "1059",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "restorm/examples/mock/library_serv.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from construct import (
Struct,
)
from distance.bytes import Magic, Section
from distance.construct import (
BaseConstructFragment,
Bytes,
DstString, Remainder,
)
from distance.classes import CollectorGroup
from .levelsettings_base import BaseLevelSettings
Classes = CollectorGroup()
@Classes.level_content.fragment
class OldLevelSettings(BaseLevelSettings, BaseConstructFragment):
"""Special settings section only found in very old maps."""
class_tag = 'OldLevelSettings'
default_container = Section(Magic[8])
# fallbacks for base LevelSettings and other usages
version = None
description = None
author_name = None
modes = ()
medal_times = ()
medal_scores = ()
background_layer = None
abilities = ()
difficulty = None
_construct_ = Struct(
'unk_0' / Bytes(4),
'skybox_name' / DstString,
'unk_1' / Bytes(143),
'name' / DstString,
'unk_2' / Remainder,
)
def _print_type(self, p):
p(f"Type: LevelSettings (old)")
# vim:set sw=4 et:
| {
"content_hash": "4824f735bccddc13b0c9292b492b4cf4",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 65,
"avg_line_length": 21.836734693877553,
"alnum_prop": 0.6551401869158878,
"repo_name": "ferreum/distanceutils",
"id": "2ee26707a5afccaf8696705b288dc2ff54350526",
"size": "1072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distance/_impl/level_content/oldlevelsettings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "498554"
},
{
"name": "Shell",
"bytes": "1061"
}
],
"symlink_target": ""
} |
"""Console script for ABR."""
import click
@click.command()
def main(args=None):
"""Console script for ABR."""
click.echo("Replace this message by putting your code into "
"ABR.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
if __name__ == "__main__":
main()
| {
"content_hash": "d9bbdbb10a87f779f285446cffba59ac",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 21.533333333333335,
"alnum_prop": 0.6099071207430341,
"repo_name": "BlackPoint-CX/ABR",
"id": "3e74eb3a412fece9c3852a65e2ec47f41a877958",
"size": "348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ABR/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "246108"
},
{
"name": "HTML",
"bytes": "29484"
},
{
"name": "Makefile",
"bytes": "2271"
},
{
"name": "Python",
"bytes": "82548"
}
],
"symlink_target": ""
} |
import csv
import json
import os
import sys
import re
from tqdm import tqdm
import time
import git
import shutil
import os.path as osp
from utils import *
import requests
from connect import *
from db_op import *
V_CLASS = sys.argv[1]
#Top 10 OSWAP 2013
# injection (sql,ldap,xpath,xquery,xml,html,os commands).
injec = re.compile('(sql|ldap|xpath|xquery|queries|xml|html|(shell|os|oper.* sys|command|cmd)).*injec|(fix|prevent|found|protect).* injec|injec.* (fix|prev|found|protect)');
# broken authentication and access control
auth = re.compile('(brute.*force|dict|sess.*hijack|broken auth).* (prevent|protect|fix)|(prevent|protect|fix).* (brute.*force|dict|sess.* hijack|broken auth)|(unauthor.*(access|contr))|vuln.* auth|plaintext pass.*|auth.* bypass|sess.* fixation|weak pass.* verif');
# xss
xss = re.compile('fix.* ( xss |cross.*(site|zone) script)|crlf injec|http resp.* split|(reflect|stored|dom).*xss|xss.*(reflect|stored|dom)|xss (vuln|prob|solution)| xss')
# csrf
csrf = re.compile('(cross.*site.*(req|ref).*forgery| csrf |sea.*surf| xsrf |(one.*click|autom).*attack|sess.*riding|conf.*deput)');
# insecure direct object references
# security misconfiguration
# sensitive data exposure
# missing function level access control
# using known vulnerable components
# unvalidated redirects and forwards
# path traversal
pathtrav = re.compile('((path|dir.*) traver.*|(dot-dot-slash|directory traversal|directory climbing|backtracking).*(attack|vuln))');
# denial of service
dos = re.compile('( dos |((distributed)? denial.*of.*service)| ddos |deadlocks)');
# sha-1 collision
sha1 = re.compile('(sha-1|sha 1|sha1) collision');
# misc
misc = re.compile('(fix|found|prevent|protect).*sec.*(bug|vulnerab|problem|defect|warning|issue|weak|attack|flaw|fault|error)|sec.* (bug|vulnerab|problem|defect|warning|issue|weak|attack|flaw|fault|error).* (fix|found|prevent|protect)|vulnerab|attack');
# memory leaks
ml = re.compile('mem.* leak|(fix|inc).* mem.* alloc');
bufover = re.compile('buff.* overflow')
fpd = re.compile('(full)? path discl')
nullp = re.compile('null pointers');
resl = re.compile('res.* leaks');
hl = re.compile('hand.* (leak|alloc)');
encryp = re.compile('encryp.* (bug|vulnerab|problem|defect|warning|issue|weak|attack|flaw|fault|error)')
def add_blobs(diff,vulPath):
for f in diff:
if f.a_blob is not None:
pathA=vulPath + 'Vdiff/added/' + f.a_path;
check_if_dir_exists(pathA)
try:
f.a_blob.stream_data(open(pathA, 'wb'))
except Exception as ex:
print 'Ex:', ex
if f.b_blob is not None:
pathB=vulPath + 'Vdiff/deleted/' + f.b_path;
check_if_dir_exists(pathB)
try:
f.b_blob.stream_data(open(pathB, 'wb'))
except Exception as ex:
print 'Ex:', ex
def save_results(conn, start, datetime,vuls):
stop = time.time()
t = stop-start;
conn.incr('stats:experiment:n')
add_experiment(conn, datetime, V_CLASS, t, vuls)
if os.path.exists('repos/'):
remove_dir('repos')
if os.path.exists('db/'):
remove_dir('db')
def mine_repos(user, repos, br):
global conn, g , V_CLASS, bucket;
id_repo = user+'_'+repos;
print(id_repo)
path = 'db/'+id_repo+'/'+V_CLASS+'/'
try:
# create output file if not exists
os.makedirs(os.path.dirname(path))
except OSError as e:
print(e)
print('Downloading...')
c_url = g.get_user(user).get_repo(repos).clone_url
repo = git.Repo.clone_from(c_url, 'repos/' + id_repo + '/', branch=br)
commits = list(repo.iter_commits());
print('Downloaded...')
n = 0;
for c in tqdm(commits):
message = c.message
if V_CLASS == "misc":
check = misc.search(message)
elif V_CLASS == "injec":
check = injec.search(message)
elif V_CLASS == "csrf":
check = csrf.search(message)
elif V_CLASS == "dos":
check = dos.search(message)
elif V_CLASS == "auth":
check = auth.search(message)
elif V_CLASS == "ml":
check = ml.search(message)
elif V_CLASS == "pathtrav":
check = pathtrav.search(message)
elif V_CLASS == "xss":
check = xss.search(message)
elif V_CLASS == "sha1":
check = sha1.search(message)
parents = list(c.parents)
if check is not None and len(parents) > 0 and commit_exists(conn, user, repos, str(c), str(V_CLASS)) == False:
n += 1;
print(c)
vpath = path + 'vuln'+ str(n) +'/';
os.makedirs(os.path.dirname(vpath))
repo.head.reference = c
bpath = id_repo + '/'+ V_CLASS +'/'+ 'vuln'+str(n)+'/'
archive_vuln(vpath + 'Vfix.tar', repo)
send_blob(bpath + 'Vfix.tar', vpath + 'Vfix.tar', bucket)
if len(parents) == 1:
vulParent = parents[0]
elif len(parents) > 1:
vulParent = parents[1]
diff = c.diff(vulParent, create_patch=True)
if len(diff) == 0:
vulParent = parents[0]
diff = c.diff(vulParent, create_patch=True)
repo.head.reference = vulParent;
archive_vuln(vpath + 'Vvul.tar', repo)
send_blob(bpath + 'Vvul.tar', vpath + 'Vvul.tar', bucket)
commit_url = g.get_user(user).get_repo(repos).get_commit(str(c)).html_url;
if commit_exists(conn, user, repos, str(c), V_CLASS) == False:
add_commit(conn, n, user, repos, V_CLASS, str(c), vulParent, '', '', '', '', commit_url)
conn.incr('stats:commit:n')
conn.incr('stats:commit:%s:%s'%(user,repos))
conn.incr('stats:commit:%s'%V_CLASS)
add_blobs(diff,vpath)
make_tarfile(vpath + 'Vdiff.tar', vpath + 'Vdiff')
send_blob(bpath + 'Vdiff.tar', vpath + 'Vdiff.tar', bucket)
shutil.rmtree(vpath + 'Vdiff')
shutil.rmtree(path)
return n;
# check arguments
if (len(sys.argv) != 2):
print("Usage: python repos_miner.py <class>")
sys.exit(0)
g = connect_to_github('config.json');
conn = connect_to_db('redis.json');
sgc = connect_to_gcloud_storage();
bucket = get_bucket(sgc, 'secbench1');
# get normal repositories
repos = get_repos_n(conn)
# datetime
datetime = time.strftime("%x") + ':' + time.strftime("%X");
# start measuring time
start = time.time()
# number of caught vulnerabilities
vuls = 0;
# mine each repository
try:
for r in repos[0]:
repo_info = get_repos_info(conn,r)[0]
owner = repo_info['owner']
name = repo_info['name']
branch = repo_info['branch']
if class_mined(conn, owner, name, V_CLASS) == False:
print('I\'m mining '+ owner +'/'+ name)
vuls += mine_repos(owner, name, branch)
set_class_mined(conn, owner, name, V_CLASS)
add_repos_to_exp(conn, datetime, V_CLASS, owner, name)
else:
print(owner +'/'+ name+ ' already mined for '+ V_CLASS +' class')
if os.path.exists('repos/'+ owner + '_' + name + '/'):
remove_dir('repos/'+ owner + '_' + name)
print('Process finished! Check results folder.')
save_results(conn, start, datetime, vuls)
except KeyboardInterrupt:
print('You have interrupted the process! Please wait, we are saving all the information.')
save_results(conn, start, datetime, vuls)
| {
"content_hash": "b5f15293d5cb6b1a917d865d833b0922",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 264,
"avg_line_length": 34.806451612903224,
"alnum_prop": 0.5975109228121276,
"repo_name": "TQRG/secbench",
"id": "f88bc08f2b39208066e3284216272d12072faa09",
"size": "7553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tool/repos_miner.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from llvmlite import ir
from llvmlite.ir.transforms import Visitor, CallVisitor
class FastFloatBinOpVisitor(Visitor):
"""
A pass to add fastmath flag to float-binop instruction if they don't have
any flags.
"""
float_binops = frozenset(['fadd', 'fsub', 'fmul', 'fdiv', 'frem', 'fcmp'])
def __init__(self, flags):
self.flags = flags
def visit_Instruction(self, instr):
if instr.opname in self.float_binops:
if not instr.flags:
for flag in self.flags:
instr.flags.append(flag)
class FastFloatCallVisitor(CallVisitor):
"""
A pass to change all float function calls to use fastmath.
"""
def __init__(self, flags):
self.flags = flags
def visit_Call(self, instr):
# Add to any call that has float/double return type
if instr.type in (ir.FloatType(), ir.DoubleType()):
for flag in self.flags:
instr.fastmath.add(flag)
def rewrite_module(mod, options):
"""
Rewrite the given LLVM module to use fastmath everywhere.
"""
flags = options.flags
FastFloatBinOpVisitor(flags).visit(mod)
FastFloatCallVisitor(flags).visit(mod)
| {
"content_hash": "3014905650ac2df2a35d6fe005af690d",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 27.522727272727273,
"alnum_prop": 0.6267547481420314,
"repo_name": "sklam/numba",
"id": "d6dd1b89c20cb78cd9b819663aa21ce270ce4bbe",
"size": "1211",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "numba/core/fastmathpass.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6783"
},
{
"name": "C",
"bytes": "638283"
},
{
"name": "C++",
"bytes": "52741"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "7918676"
},
{
"name": "Shell",
"bytes": "7823"
}
],
"symlink_target": ""
} |
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
'''
SQLAlchemy models for Dragon data.
'''
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import types
from json import dumps
from json import loads
from dragon.openstack.common import exception
from dragon.openstack.common import uuidutils
from dragon.openstack.common import timeutils
from dragon.db.sqlalchemy.session import get_session
from sqlalchemy.orm.session import Session
# from sqlalchemy import UniqueConstraint
BASE = declarative_base()
class Json(types.TypeDecorator):
impl = types.Text
def load_dialect_impl(self, dialect):
if dialect.name == 'mysql':
return dialect.type_descriptor(mysql.LONGTEXT())
else:
return self.impl
def process_bind_param(self, value, dialect):
return dumps(value)
def process_result_value(self, value, dialect):
return loads(value)
# TODO(leizhang) When we removed sqlalchemy 0.7 dependence
# we can import MutableDict directly and remove ./mutable.py
try:
from sqlalchemy.ext.mutable import MutableDict as sa_MutableDict
sa_MutableDict.associate_with(Json)
except ImportError:
from dragon.db.sqlalchemy.mutable import MutableDict
MutableDict.associate_with(Json)
class DragonBase(object):
"""Base class for Heat Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
created_at = sqlalchemy.Column(sqlalchemy.DateTime,
default=timeutils.utcnow)
updated_at = sqlalchemy.Column(sqlalchemy.DateTime,
onupdate=timeutils.utcnow)
def save(self, session=None):
"""Save this object."""
if not session:
session = Session.object_session(self)
if not session:
session = get_session()
session.add(self)
try:
session.flush()
except IntegrityError as e:
if str(e).endswith('is not unique'):
raise exception.Duplicate(str(e))
else:
raise
def expire(self, session=None, attrs=None):
"""Expire this object ()."""
if not session:
session = Session.object_session(self)
if not session:
session = get_session()
session.expire(self, attrs)
def refresh(self, session=None, attrs=None):
"""Refresh this object."""
if not session:
session = Session.object_session(self)
if not session:
session = get_session()
session.refresh(self, attrs)
def delete(self, session=None):
"""Delete this object."""
if not session:
session = Session.object_session(self)
if not session:
session = get_session()
session.delete(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
self._i = iter(object_mapper(self).columns)
return self
def next(self):
n = self._i.next().name
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in values.iteritems():
setattr(self, k, v)
def update_and_save(self, values, session=None):
if not session:
session = Session.object_session(self)
if not session:
session = get_session()
session.begin()
for k, v in values.iteritems():
setattr(self, k, v)
session.commit()
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
class SoftDelete(object):
deleted_at = sqlalchemy.Column(sqlalchemy.DateTime)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.update_and_save({'deleted_at': timeutils.utcnow()},
session=session)
""" start dragon """
class Resource_type(BASE, DragonBase):
__tablename__ = 'resource_type'
id = sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
nullable=False, autoincrement=True)
name = sqlalchemy.Column('name', sqlalchemy.String, nullable=False)
default_action_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('actions.id'),
nullable=True)
class Resources(BASE, DragonBase):
__tablename__ = 'resources'
id = sqlalchemy.Column('id', sqlalchemy.String,
primary_key=True,
default=uuidutils.generate_uuid)
name = sqlalchemy.Column('name', sqlalchemy.String, nullable=False)
tenant_id = sqlalchemy.Column('tenant_id', sqlalchemy.String,
nullable=False)
resource_type_id =\
sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('resource_type.id'),
nullable=False)
resource_type = relationship(Resource_type, lazy='joined',
backref=backref('resources'),
foreign_keys=resource_type_id,
primaryjoin='Resources.resource_type_id=='
'Resource_type.id')
class Actions(BASE, DragonBase):
__tablename__ = 'actions'
id = sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
nullable=False, autoincrement=True)
class_name = sqlalchemy.Column('class_name', sqlalchemy.String,
nullable=False)
name = sqlalchemy.Column('name', sqlalchemy.String)
resource_type_id =\
sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('resource_type.id'))
resource_type = relationship(Resource_type, backref=backref('actions'),
foreign_keys=resource_type_id,
primaryjoin='Actions.resource_type_id=='
'Resource_type.id')
class Workload_policies(BASE, DragonBase, SoftDelete):
__tablename__ = 'workload_policies'
id = sqlalchemy.Column('id', sqlalchemy.String,
primary_key=True,
default=uuidutils.generate_uuid)
name = sqlalchemy.Column('name', sqlalchemy.String, nullable=False)
tenant_id = sqlalchemy.Column('tenant_id',
sqlalchemy.String, nullable=False)
class Workload_policy_execution(BASE, DragonBase):
__tablename__ = 'workload_policy_execution'
id = sqlalchemy.Column('id', sqlalchemy.String,
primary_key=True,
default=uuidutils.generate_uuid)
status = sqlalchemy.Column('status', sqlalchemy.String, nullable=False)
workload_policy_id =\
sqlalchemy.Column(sqlalchemy.String,
sqlalchemy.ForeignKey('workload_policies.id'),
nullable=False)
workload_policy =\
relationship(Workload_policies,
backref=backref('workload_policy_execution'),
foreign_keys=workload_policy_id,
cascade="delete",
passive_deletes=True,
primaryjoin='Workload_policy_execution.'
'workload_policy_id=='
'Workload_policies.id')
class Action_execution(BASE, DragonBase):
__tablename__ = 'action_execution'
id = sqlalchemy.Column('id', sqlalchemy.String,
primary_key=True,
default=uuidutils.generate_uuid)
status = sqlalchemy.Column('status', sqlalchemy.String,
nullable=False)
action_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('actions.id'),
nullable=False)
resource_id = sqlalchemy.Column(sqlalchemy.String,
sqlalchemy.ForeignKey('resources.id'),
nullable=False)
workload_policy_execution_id = sqlalchemy.Column(
sqlalchemy.String,
sqlalchemy.ForeignKey('workload_policy_execution.id'),
nullable=False)
action =\
relationship(Actions,
backref=backref('action_execution'),
foreign_keys=action_id,
primaryjoin='Action_execution.action_id==Actions.id')
resource =\
relationship(Resources,
backref=backref('action_execution'),
foreign_keys=resource_id,
primaryjoin='Action_execution.resource_id=='
'Resources.id')
workload_policy_exec =\
relationship(Workload_policy_execution,
backref=backref('action_execution'),
foreign_keys=workload_policy_execution_id,
cascade="delete",
passive_deletes=True,
primaryjoin='Action_execution.'
'workload_policy_execution_id=='
'Workload_policy_execution.id')
class Action_resource(BASE, DragonBase):
__tablename__ = 'action_resource'
id = sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
nullable=False, autoincrement=True)
workload_policy_id = sqlalchemy.Column(
sqlalchemy.String,
sqlalchemy.ForeignKey('workload_policies.id'),
nullable=False)
resource_id =\
sqlalchemy.Column(sqlalchemy.String,
sqlalchemy.ForeignKey('resources.id'),
nullable=False)
action_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('actions.id'),
nullable=False)
resource =\
relationship(Resources,
backref=backref('action_resource'),
foreign_keys=resource_id,
primaryjoin='Action_resource.resource_id=='
'Resources.id')
action =\
relationship(Actions,
backref=backref('action_resource'),
foreign_keys=action_id,
primaryjoin='Action_resource.action_id=='
'Actions.id')
workload_policy =\
relationship(Workload_policies,
backref=backref('action_resource'),
foreign_keys=workload_policy_id,
cascade="delete",
passive_deletes=True,
primaryjoin='Action_resource.'
'workload_policy_id== Workload_policies.id')
| {
"content_hash": "6f0cb1b0d08619ae5cb26d69af9c8a0d",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 79,
"avg_line_length": 36.6189111747851,
"alnum_prop": 0.5442879499217528,
"repo_name": "os-cloud-storage/openstack-workload-disaster-recovery",
"id": "687896b07a68ce115b0813366a2d50496684406b",
"size": "12824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragon/db/sqlalchemy/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4930"
},
{
"name": "Python",
"bytes": "758400"
},
{
"name": "Shell",
"bytes": "24692"
}
],
"symlink_target": ""
} |
"""
Created on 2015-05-19
@author: Danny<manyunkai@hotmail.com>
DannyWork Project
"""
from __future__ import unicode_literals
from django import forms
from .models import ImageItem
class ImageItemForm(forms.ModelForm):
"""
图片对象 Form
"""
class Meta:
model = ImageItem
fields = ['title', 'author', 'description', 'image']
widgets = {
'description': forms.Textarea({'cols': '100', 'rows': '5'})
}
| {
"content_hash": "1eeb11839efa936b7f8b7e573708d340",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 18.44,
"alnum_prop": 0.6052060737527115,
"repo_name": "manyunkai/dannysite4",
"id": "896b79fc54d70549c608e5d2cad27b3563261a57",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/box/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "203273"
},
{
"name": "HTML",
"bytes": "349894"
},
{
"name": "JavaScript",
"bytes": "239150"
},
{
"name": "PHP",
"bytes": "4398"
},
{
"name": "Python",
"bytes": "129595"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup( name="rimage",
version = "0.1",
description = "Generic framework to associate a profile image with mongo documents.",
author = "Manas Garg",
author_email = "manasgarg@gmail.com",
license = "BSD License",
url = "https://github.com/manasgarg/rimage",
packages = ["rimage"],
long_description = ""
)
| {
"content_hash": "93049b96f5110f9fd8f874d41cd719d2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 28.46153846153846,
"alnum_prop": 0.654054054054054,
"repo_name": "manasgarg/rimage",
"id": "fc44ef141d18459181ff189110a03f3b9ecc7c5a",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7792"
}
],
"symlink_target": ""
} |
import os
import shutil
import build_utils
def get_supported_targets(platform):
if platform == 'win32':
return ['win32']
elif platform == 'darwin':
return ['macos']
elif platform == 'linux':
return ['linux']
else:
return []
def get_dependencies_for_target(target):
return []
def build_for_target(target, working_directory_path, root_project_path):
if target == 'win32':
_build_win32(working_directory_path, root_project_path)
elif target == 'macos':
_build_macos(working_directory_path, root_project_path)
elif target == 'linux':
_build_linux(working_directory_path, root_project_path)
def get_download_info():
return 'https://github.com/google/googletest.git'
def _copyLib(src, dst):
if not os.path.isdir(dst):
os.makedirs(dst)
shutil.copy2(src, dst)
def _download(working_directory_path):
source_folder_path = os.path.join(working_directory_path, 'googletest')
build_utils.run_process(
['git', 'clone', get_download_info()],
process_cwd=working_directory_path,
shell=True)
build_utils.run_process(
['git', 'checkout', 'tags/release-1.8.0'],
process_cwd=source_folder_path,
shell=True)
return source_folder_path
@build_utils.run_once
def _patch_sources(working_directory_path):
build_utils.apply_patch(
os.path.abspath('patch.diff'), working_directory_path)
def _build_win32(working_directory_path, root_project_path):
source_folder_path = _download(working_directory_path)
_patch_sources(source_folder_path)
override_props_file=os.path.abspath('override.props')
msbuild_args=[
"/p:ForceImportBeforeCppTargets={}".format(override_props_file),
]
build_utils.build_and_copy_libraries_win32_cmake(
os.path.join(source_folder_path, '_build'),
source_folder_path,
root_project_path,
'googletest-distribution.sln', 'gmock',
'gmock.lib', 'gmock.lib',
'gmock.lib', 'gmock.lib',
'gmock.lib', 'gmock.lib',
msbuild_args=msbuild_args,
target_lib_subdir='googlemock',
static_runtime=False)
_copy_headers(source_folder_path, root_project_path)
def _build_macos(working_directory_path, root_project_path):
source_folder_path = _download(working_directory_path)
_patch_sources(source_folder_path)
build_utils.build_and_copy_libraries_macos_cmake(
os.path.join(source_folder_path, '_build'),
source_folder_path,
root_project_path,
'googletest-distribution.xcodeproj', 'gmock',
'libgmock.a', 'libgmock.a',
target_lib_subdir='googlemock')
_copy_headers(source_folder_path, root_project_path)
def _build_linux(working_directory_path, root_project_path):
source_folder_path = _download(working_directory_path)
_patch_sources(source_folder_path)
build_utils.build_and_copy_libraries_linux_cmake(
os.path.join(source_folder_path, '_build'),
source_folder_path,
root_project_path,
target='all',
lib_name='libgmock.a',
target_lib_subdir='googlemock')
_copy_headers(source_folder_path, root_project_path)
def _copy_headers(source_folder_path, root_project_path):
gmock_from_dir = os.path.join(
source_folder_path, 'googlemock/include/gmock')
gmock_to_dir = os.path.join(
root_project_path, 'Libs/include/googlemock/gmock')
gtest_from_dir = os.path.join(
source_folder_path, 'googletest/include/gtest')
gtest_to_dir = os.path.join(
root_project_path, 'Libs/include/googlemock/gtest')
scripts_from_dir = os.path.join(
source_folder_path, 'googlemock/scripts')
scripts_to_dir = os.path.join(
root_project_path, 'Thirdparty/googlemock/scripts')
build_utils.clean_copy_includes(gmock_from_dir, gmock_to_dir)
build_utils.clean_copy_includes(gtest_from_dir, gtest_to_dir)
build_utils.clean_copy_includes(scripts_from_dir, scripts_to_dir)
| {
"content_hash": "f22ed074771373d833ea2eeaf4d75e98",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 75,
"avg_line_length": 31.146153846153847,
"alnum_prop": 0.6599160286490492,
"repo_name": "dava/dava.engine",
"id": "22dc801a16935ff397630634a7727c81fba20f16",
"size": "4049",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "Thirdparty/googlemock/build.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "166572"
},
{
"name": "Batchfile",
"bytes": "18562"
},
{
"name": "C",
"bytes": "61621347"
},
{
"name": "C#",
"bytes": "574524"
},
{
"name": "C++",
"bytes": "50229645"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "11439187"
},
{
"name": "CSS",
"bytes": "32773"
},
{
"name": "Cuda",
"bytes": "37073"
},
{
"name": "DIGITAL Command Language",
"bytes": "27303"
},
{
"name": "Emacs Lisp",
"bytes": "44259"
},
{
"name": "Fortran",
"bytes": "8835"
},
{
"name": "GLSL",
"bytes": "3726"
},
{
"name": "Go",
"bytes": "1235"
},
{
"name": "HTML",
"bytes": "8621333"
},
{
"name": "Java",
"bytes": "232072"
},
{
"name": "JavaScript",
"bytes": "2560"
},
{
"name": "Lua",
"bytes": "43080"
},
{
"name": "M4",
"bytes": "165145"
},
{
"name": "Makefile",
"bytes": "1349214"
},
{
"name": "Mathematica",
"bytes": "4633"
},
{
"name": "Module Management System",
"bytes": "15224"
},
{
"name": "Objective-C",
"bytes": "1909821"
},
{
"name": "Objective-C++",
"bytes": "498191"
},
{
"name": "Pascal",
"bytes": "99390"
},
{
"name": "Perl",
"bytes": "396608"
},
{
"name": "Python",
"bytes": "782784"
},
{
"name": "QML",
"bytes": "43105"
},
{
"name": "QMake",
"bytes": "156"
},
{
"name": "Roff",
"bytes": "71083"
},
{
"name": "Ruby",
"bytes": "22742"
},
{
"name": "SAS",
"bytes": "16030"
},
{
"name": "Shell",
"bytes": "2482394"
},
{
"name": "Slash",
"bytes": "117430"
},
{
"name": "Smalltalk",
"bytes": "5908"
},
{
"name": "TeX",
"bytes": "428489"
},
{
"name": "Vim script",
"bytes": "133255"
},
{
"name": "Visual Basic",
"bytes": "54056"
},
{
"name": "WebAssembly",
"bytes": "13987"
}
],
"symlink_target": ""
} |
import csv
import os
import glob
import argparse
import numpy as np
def merge_csv_files(src_dir, dst_dir):
'''
Takes a list of csv files (full file path) and merges them into a
single output.
'''
# Create a list of all the md files in the given directory
csv_list = glob.glob(os.path.join(src_dir, '*_md.csv'))
csv_list.sort() # This will sort by frame number
# extract the flight name (i.e. date) from one of the csv files
fname = os.path.basename(csv_list[0])
flight_id = fname.rsplit('_', 2)[0].split('/')[-1]
yyyy, mm, dd = flight_id.split('_')
aggregate_data = []
# Read each file and add it to the aggregate list
for file in csv_list:
data = read_md_file(file)
aggregate_data.append(data)
dst_name = 'GR_{}{}{}_metadata.csv'.format(yyyy, mm, dd)
dst_file = os.path.join(dst_dir, dst_name)
# Write the aggregated data to a single output file
write_merged_md(dst_file, aggregate_data)
def read_md_file(md_file):
'''
Reads a single metadata file and returns the relevant information
'''
qa = 0
snow, gray, pond, ocean, shadow = 0, 0, 0, 0, 0
# Get the frame number from the filename
fname = os.path.basename(md_file)
frame = int(fname.split('_')[3])
with open(md_file, 'r') as md:
csv_reader = csv.reader(md)
# Remove the header
try:
next(csv_reader)
for row in csv_reader:
qa = float(row[0])
snow = float(row[1])
gray = float(row[2])
pond = float(row[3])
ocean = float(row[4])
try:
shadow = float(row[5])
except IndexError:
shadow = 0
except Exception as e: # Make sure empty files don't crash the tool
print('Caught exception: ' + str(e))
print('File: ' + md_file)
data = [frame, qa, snow, gray, pond, ocean, shadow]
return data
def write_merged_md(dst_file, aggregate_data):
print("Writing to {}...".format(dst_file))
with open(dst_file, 'w') as md:
csv_writer = csv.writer(md)
csv_writer.writerow(["Frame", "Quality Score", "White Ice",
"Gray Ice", "Melt Ponds", "Open Water", "Shadow"])
csv_writer.writerows(aggregate_data)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("src_dir",
help="Source directory containing *_md.csv files.")
parser.add_argument("--dst_dir", default=None,
help="folder to place the merged data file")
args = parser.parse_args()
#src_dir = os.path.dirname(args.src_dir)
src_dir = args.src_dir
dst_dir = args.dst_dir
if dst_dir is None:
dst_dir = os.path.split(src_dir)[0]
#else:
# dst_dir = os.path.dirname(dst_dir)
merge_csv_files(src_dir, dst_dir)
if __name__ == '__main__':
main()
| {
"content_hash": "f5b06596a67e6ddc7e8cfd58f22e780f",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 28.29245283018868,
"alnum_prop": 0.5658552850950317,
"repo_name": "NeoGeographyToolkit/Tools",
"id": "fb7b42e8538824d0f54817b51c461bc8ac30b4df",
"size": "2999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nsidc_upload/labels/merge_md.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "169703"
},
{
"name": "CMake",
"bytes": "15434"
},
{
"name": "Perl",
"bytes": "37337"
},
{
"name": "Python",
"bytes": "273477"
},
{
"name": "Shell",
"bytes": "58824"
}
],
"symlink_target": ""
} |
from core import geo, pmap
import numpy as np
from datetime import timedelta
import glob
import os
from netcdf import netcdf as nc
from cache import Cache, Loader
from helpers import short
import logging
class Heliosat2(object):
def __init__(self, config, strategy_type):
self.config = config
self.filenames = config['data']
self.SAT_LON = -75.113
# -75.3305 # longitude of sub-satellite point in degrees
self.IMAGE_PER_HOUR = 2
self.GOES_OBSERVED_ALBEDO_CALIBRATION = 1.89544 * (10 ** (-3))
self.i0met = np.pi / self.GOES_OBSERVED_ALBEDO_CALIBRATION
self.strategy_type = strategy_type
self.cache = TemporalCache(self)
def create_1px_dimensions(self, root):
nc.getdim(root, 'xc_k', 1)
nc.getdim(root, 'yc_k', 1)
nc.getdim(root, 'time', 1)
def create_slots(self, loader, cache, strategy):
self.create_1px_dimensions(cache)
time = loader.time
shape = list(time.shape)
shape.append(1)
strategy.times = time.reshape(tuple(shape))
strategy.slots = cache.getvar('slots', 'i1', ('time', 'yc_k', 'xc_k'))
strategy.slots[:] = strategy.calculate_slots(self.IMAGE_PER_HOUR)
nc.sync(cache)
def create_variables(self, loader, cache, strategy):
self.create_slots(loader, cache, strategy)
self.create_temporal(loader, cache, strategy)
def create_temporal(self, loader, cache, strategy):
create_f = lambda name, source: cache.getvar(name, 'f4', source=source)
create = lambda name, source: cache.getvar(name, source=source)
strategy.declination = create_f('declination', strategy.slots)
strategy.solarangle = create_f('solarangle', loader.ref_data)
nc.sync(cache)
strategy.solarelevation = create('solarelevation', strategy.solarangle)
strategy.excentricity = create_f('excentricity', strategy.slots)
strategy.gc = create('gc', strategy.solarangle)
strategy.atmosphericalbedo = create('atmosphericalbedo',
strategy.solarangle)
strategy.t_sat = create('t_sat', loader.ref_lon)
strategy.t_earth = create('t_earth', strategy.solarangle)
strategy.cloudalbedo = create('cloudalbedo', strategy.solarangle)
nc.sync(cache)
def update_temporalcache(self, loader, cache):
logging.info("Updating temporal cache... ")
self.strategy = self.strategy_type(self, loader, cache)
self.strategy.update_temporalcache(loader, cache)
def estimate_globalradiation(self, loader, cache):
# There is nothing to do, if there isn't new cache and strategy setted.
if hasattr(self, 'strategy'):
logging.info("Obtaining the global radiation... ")
output = OutputCache(self)
self.strategy.estimate_globalradiation(loader, cache, output)
output.dump()
cache.dump()
def run_with(self, loader):
self.estimate_globalradiation(loader, self.cache)
class AlgorithmCache(Cache):
def __init__(self, algorithm):
super(AlgorithmCache, self).__init__()
self.algorithm = algorithm
self.tile_config = self.algorithm.config['tile_cut']
self.filenames = self.algorithm.filenames
self.initialize_path(self.filenames)
class TemporalCache(AlgorithmCache):
def __init__(self, algorithm):
super(TemporalCache, self).__init__(algorithm)
self.update_cache(self.filenames)
self.cache = Loader(pmap(self.get_cached_file, self.filenames),
tile_cut=self.tile_config)
self.root = self.cache.root
def initialize_path(self, filenames):
self.path = '/'.join(filenames[0].split('/')[0:-1])
self.temporal_path = self.algorithm.config['temporal_cache']
self.index = {self.get_cached_file(v): v for v in filenames}
if not os.path.exists(self.temporal_path):
os.makedirs(self.temporal_path)
def get_cached_file(self, filename):
return '%s/%s' % (self.temporal_path, short(filename, None, None))
def update_cache(self, filenames):
self.clean_cache(filenames)
self.extend_cache(filenames)
def extend_cache(self, filenames):
cached_files = glob.glob('%s/*.nc' % self.temporal_path)
not_cached = filter(lambda f: self.get_cached_file(f)
not in cached_files,
filenames)
if not_cached:
loader = Loader(not_cached, self.tile_config)
new_files = pmap(self.get_cached_file, not_cached)
with nc.loader(new_files, dimensions=self.tile_config) as cache:
self.algorithm.update_temporalcache(loader, cache)
loader.dump()
def clean_cache(self, exceptions):
cached_files = glob.glob('%s/*.nc' % self.temporal_path)
old_cache = filter(lambda f: self.index[f] not in exceptions,
cached_files)
pmap(os.remove, old_cache)
def getvar(self, *args, **kwargs):
name = args[0]
if name not in self._attrs.keys():
tmp = list(args)
tmp.insert(0, self.cache.root)
self._attrs[name] = nc.getvar(*tmp, **kwargs)
return self._attrs[name]
class OutputCache(AlgorithmCache):
def __init__(self, algorithm):
super(OutputCache, self).__init__(algorithm)
self.output = Loader(pmap(self.get_output_file, self.filenames),
tile_cut=self.tile_config)
self.root = self.output.root
with nc.loader(self.filenames, dimensions=self.tile_config) as images:
map(algorithm.create_1px_dimensions, self.root.roots)
self.root.getvar('time', source=images.getvar('time'))
self.root.getvar('cloudindex',
'f4', source=images.getvar('data'))
self.root.getvar('globalradiation',
'f4', source=images.getvar('data'))
def initialize_path(self, filenames):
self.path = '/'.join(filenames[0].split('/')[0:-1])
self.output_path = self.algorithm.config['product']
self.index = {self.get_output_file(v): v for v in filenames}
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
def get_output_file(self, filename):
return '%s/%s' % (self.output_path, short(filename, None, None))
def run(**config):
loader = Loader(config['data'], tile_cut=config['tile_cut'])
algorithm = Heliosat2(config, geo.strategy)
algorithm.run_with(loader)
loader.dump()
| {
"content_hash": "d27ff2cd07a5dee18bc6da4083c2af25",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 40.00595238095238,
"alnum_prop": 0.6168724892129147,
"repo_name": "scottlittle/solar_radiation_model",
"id": "3f0ca3814d16a097ee4f74b31f4c167f1619f2bc",
"size": "6744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/heliosat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cuda",
"bytes": "10789"
},
{
"name": "Makefile",
"bytes": "3575"
},
{
"name": "Python",
"bytes": "62469"
}
],
"symlink_target": ""
} |
"""Support for providing temporary directories to test functions."""
import os
import re
import sys
import tempfile
from pathlib import Path
from shutil import rmtree
from typing import Dict
from typing import Generator
from typing import Optional
from typing import TYPE_CHECKING
from typing import Union
from _pytest.nodes import Item
from _pytest.stash import StashKey
if TYPE_CHECKING:
from typing_extensions import Literal
RetentionType = Literal["all", "failed", "none"]
import attr
from _pytest.config.argparsing import Parser
from .pathlib import LOCK_TIMEOUT
from .pathlib import make_numbered_dir
from .pathlib import make_numbered_dir_with_cleanup
from .pathlib import rm_rf
from .pathlib import cleanup_dead_symlink
from _pytest.compat import final
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.deprecated import check_ispytest
from _pytest.fixtures import fixture
from _pytest.fixtures import FixtureRequest
from _pytest.monkeypatch import MonkeyPatch
tmppath_result_key = StashKey[Dict[str, bool]]()
@final
@attr.s(init=False)
class TempPathFactory:
"""Factory for temporary directories under the common base temp directory.
The base directory can be configured using the ``--basetemp`` option.
"""
_given_basetemp = attr.ib(type=Optional[Path])
_trace = attr.ib()
_basetemp = attr.ib(type=Optional[Path])
_retention_count = attr.ib(type=int)
_retention_policy = attr.ib(type="RetentionType")
def __init__(
self,
given_basetemp: Optional[Path],
retention_count: int,
retention_policy: "RetentionType",
trace,
basetemp: Optional[Path] = None,
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
if given_basetemp is None:
self._given_basetemp = None
else:
# Use os.path.abspath() to get absolute path instead of resolve() as it
# does not work the same in all platforms (see #4427).
# Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012).
self._given_basetemp = Path(os.path.abspath(str(given_basetemp)))
self._trace = trace
self._retention_count = retention_count
self._retention_policy = retention_policy
self._basetemp = basetemp
@classmethod
def from_config(
cls,
config: Config,
*,
_ispytest: bool = False,
) -> "TempPathFactory":
"""Create a factory according to pytest configuration.
:meta private:
"""
check_ispytest(_ispytest)
count = int(config.getini("tmp_path_retention_count"))
if count < 0:
raise ValueError(
f"tmp_path_retention_count must be >= 0. Current input: {count}."
)
policy = config.getini("tmp_path_retention_policy")
if policy not in ("all", "failed", "none"):
raise ValueError(
f"tmp_path_retention_policy must be either all, failed, none. Current intput: {policy}."
)
return cls(
given_basetemp=config.option.basetemp,
trace=config.trace.get("tmpdir"),
retention_count=count,
retention_policy=policy,
_ispytest=True,
)
def _ensure_relative_to_basetemp(self, basename: str) -> str:
basename = os.path.normpath(basename)
if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp():
raise ValueError(f"{basename} is not a normalized and relative path")
return basename
def mktemp(self, basename: str, numbered: bool = True) -> Path:
"""Create a new temporary directory managed by the factory.
:param basename:
Directory base name, must be a relative path.
:param numbered:
If ``True``, ensure the directory is unique by adding a numbered
suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True``
means that this function will create directories named ``"foo-0"``,
``"foo-1"``, ``"foo-2"`` and so on.
:returns:
The path to the new directory.
"""
basename = self._ensure_relative_to_basetemp(basename)
if not numbered:
p = self.getbasetemp().joinpath(basename)
p.mkdir(mode=0o700)
else:
p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700)
self._trace("mktemp", p)
return p
def getbasetemp(self) -> Path:
"""Return the base temporary directory, creating it if needed.
:returns:
The base temporary directory.
"""
if self._basetemp is not None:
return self._basetemp
if self._given_basetemp is not None:
basetemp = self._given_basetemp
if basetemp.exists():
rm_rf(basetemp)
basetemp.mkdir(mode=0o700)
basetemp = basetemp.resolve()
else:
from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT")
temproot = Path(from_env or tempfile.gettempdir()).resolve()
user = get_user() or "unknown"
# use a sub-directory in the temproot to speed-up
# make_numbered_dir() call
rootdir = temproot.joinpath(f"pytest-of-{user}")
try:
rootdir.mkdir(mode=0o700, exist_ok=True)
except OSError:
# getuser() likely returned illegal characters for the platform, use unknown back off mechanism
rootdir = temproot.joinpath("pytest-of-unknown")
rootdir.mkdir(mode=0o700, exist_ok=True)
# Because we use exist_ok=True with a predictable name, make sure
# we are the owners, to prevent any funny business (on unix, where
# temproot is usually shared).
# Also, to keep things private, fixup any world-readable temp
# rootdir's permissions. Historically 0o755 was used, so we can't
# just error out on this, at least for a while.
if sys.platform != "win32":
uid = os.getuid()
rootdir_stat = rootdir.stat()
# getuid shouldn't fail, but cpython defines such a case.
# Let's hope for the best.
if uid != -1:
if rootdir_stat.st_uid != uid:
raise OSError(
f"The temporary directory {rootdir} is not owned by the current user. "
"Fix this and try again."
)
if (rootdir_stat.st_mode & 0o077) != 0:
os.chmod(rootdir, rootdir_stat.st_mode & ~0o077)
keep = self._retention_count
if self._retention_policy == "none":
keep = 0
basetemp = make_numbered_dir_with_cleanup(
prefix="pytest-",
root=rootdir,
keep=keep,
lock_timeout=LOCK_TIMEOUT,
mode=0o700,
)
assert basetemp is not None, basetemp
self._basetemp = basetemp
self._trace("new basetemp", basetemp)
return basetemp
def get_user() -> Optional[str]:
"""Return the current user name, or None if getuser() does not work
in the current environment (see #1010)."""
try:
# In some exotic environments, getpass may not be importable.
import getpass
return getpass.getuser()
except (ImportError, KeyError):
return None
def pytest_configure(config: Config) -> None:
"""Create a TempPathFactory and attach it to the config object.
This is to comply with existing plugins which expect the handler to be
available at pytest_configure time, but ideally should be moved entirely
to the tmp_path_factory session fixture.
"""
mp = MonkeyPatch()
config.add_cleanup(mp.undo)
_tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True)
mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False)
def pytest_addoption(parser: Parser) -> None:
parser.addini(
"tmp_path_retention_count",
help="How many sessions should we keep the `tmp_path` directories, according to `tmp_path_retention_policy`.",
default=3,
)
parser.addini(
"tmp_path_retention_policy",
help="Controls which directories created by the `tmp_path` fixture are kept around, based on test outcome. "
"(all/failed/none)",
default="failed",
)
@fixture(scope="session")
def tmp_path_factory(request: FixtureRequest) -> TempPathFactory:
"""Return a :class:`pytest.TempPathFactory` instance for the test session."""
# Set dynamically by pytest_configure() above.
return request.config._tmp_path_factory # type: ignore
def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path:
name = request.node.name
name = re.sub(r"[\W]", "_", name)
MAXVAL = 30
name = name[:MAXVAL]
return factory.mktemp(name, numbered=True)
@fixture
def tmp_path(
request: FixtureRequest, tmp_path_factory: TempPathFactory
) -> Generator[Path, None, None]:
"""Return a temporary directory path object which is unique to each test
function invocation, created as a sub directory of the base temporary
directory.
By default, a new base temporary directory is created each test session,
and old bases are removed after 3 sessions, to aid in debugging. If
``--basetemp`` is used then it is cleared each session. See :ref:`base
temporary directory`.
The returned object is a :class:`pathlib.Path` object.
"""
path = _mk_tmp(request, tmp_path_factory)
yield path
# Remove the tmpdir if the policy is "failed" and the test passed.
tmp_path_factory: TempPathFactory = request.session.config._tmp_path_factory # type: ignore
policy = tmp_path_factory._retention_policy
result_dict = request.node.stash[tmppath_result_key]
if policy == "failed" and result_dict.get("call", True):
# We do a "best effort" to remove files, but it might not be possible due to some leaked resource,
# permissions, etc, in which case we ignore it.
rmtree(path, ignore_errors=True)
del request.node.stash[tmppath_result_key]
# remove dead symlink
basetemp = tmp_path_factory._basetemp
if basetemp is None:
return
cleanup_dead_symlink(basetemp)
def pytest_sessionfinish(session, exitstatus: Union[int, ExitCode]):
"""After each session, remove base directory if all the tests passed,
the policy is "failed", and the basetemp is not specified by a user.
"""
tmp_path_factory: TempPathFactory = session.config._tmp_path_factory
if tmp_path_factory._basetemp is None:
return
policy = tmp_path_factory._retention_policy
if (
exitstatus == 0
and policy == "failed"
and tmp_path_factory._given_basetemp is None
):
passed_dir = tmp_path_factory._basetemp
if passed_dir.exists():
# We do a "best effort" to remove files, but it might not be possible due to some leaked resource,
# permissions, etc, in which case we ignore it.
rmtree(passed_dir, ignore_errors=True)
@hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: Item, call):
outcome = yield
result = outcome.get_result()
if tmppath_result_key not in item.stash:
item.stash[tmppath_result_key] = {result.when: result.passed}
else:
item.stash[tmppath_result_key][result.when] = result.passed
| {
"content_hash": "9d5e01ea546a21982f3de654934214d8",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 118,
"avg_line_length": 36.33435582822086,
"alnum_prop": 0.6268467707893626,
"repo_name": "pytest-dev/pytest",
"id": "3fd8168b6d28570f7aa6cab89ba2df35e014ecee",
"size": "11845",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/_pytest/tmpdir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "192"
},
{
"name": "Python",
"bytes": "2748374"
}
],
"symlink_target": ""
} |
from AccessControl import getSecurityManager
from Products.Archetypes.config import REFERENCE_CATALOG
from Products.CMFCore.utils import getToolByName
from bika.lims.browser import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.bika_listing import BikaListingView
from bika.lims.utils import isActive
from operator import itemgetter
from bika.lims.browser.analyses import AnalysesView
from plone.app.layout.globals.interfaces import IViewView
from zope.component import getMultiAdapter
from zope.interface import implements
import json, plone
from operator import itemgetter
class ViewView(BrowserView):
""" Reference Sample View
"""
implements(IViewView)
template = ViewPageTemplateFile("templates/referencesample_view.pt")
def __init__(self, context, request):
BrowserView.__init__(self, context, request)
self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png"
def __call__(self):
rc = getToolByName(self.context, REFERENCE_CATALOG)
self.results = {} # {category_title: listofdicts}
for r in self.context.getReferenceResults():
service = rc.lookupObject(r['uid'])
cat = service.getCategoryTitle()
if cat not in self.results:
self.results[cat] = []
r['service'] = service
self.results[cat].append(r)
self.categories = self.results.keys()
self.categories.sort()
return self.template()
class ReferenceAnalysesViewView(BrowserView):
""" View of Reference Analyses linked to the Reference Sample.
"""
implements(IViewView)
template = ViewPageTemplateFile("templates/referencesample_analyses.pt")
def __init__(self, context, request):
super(ReferenceAnalysesViewView, self).__init__(context, request)
self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png"
self.title = self.context.translate(_("Reference Analyses"))
self.description = ""
self._analysesview = None
def __call__(self):
return self.template()
def get_analyses_table(self):
""" Returns the table of Reference Analyses
"""
return self.get_analyses_view().contents_table()
def get_analyses_view(self):
if not self._analysesview:
# Creates the Analyses View if not exists yet
self._analysesview = ReferenceAnalysesView(self.context,
self.request)
self._analysesview.allow_edit = False
self._analysesview.show_select_column = False
self._analysesview.show_workflow_action_buttons = False
self._analysesview.form_id = "%s_qcanalyses" % self.context.UID()
self._analysesview.review_states[0]['transitions'] = [{}]
return self._analysesview
def getReferenceSampleId(self):
return self.context.id;
def get_analyses_json(self):
return self.get_analyses_view().get_analyses_json()
class ReferenceAnalysesView(AnalysesView):
""" Reference Analyses on this sample
"""
implements(IViewView)
def __init__(self, context, request):
AnalysesView.__init__(self, context, request)
self.catalog = 'bika_analysis_catalog'
self.contentFilter = {'portal_type':'ReferenceAnalysis',
'path': {'query':"/".join(self.context.getPhysicalPath()),
'level':0}}
self.show_select_row = False
self.show_sort_column = False
self.show_select_column = False
self.allow_edit = False
self.columns = {
'id': {'title': _('ID'), 'toggle':False},
'getReferenceAnalysesGroupID': {'title': _('QC Sample ID'), 'toggle': True},
'Category': {'title': _('Category'), 'toggle':True},
'Service': {'title': _('Service'), 'toggle':True},
'Worksheet': {'title': _('Worksheet'), 'toggle':True},
'Method': {
'title': _('Method'),
'sortable': False,
'toggle': True},
'Instrument': {
'title': _('Instrument'),
'sortable': False,
'toggle': True},
'Result': {'title': _('Result'), 'toggle':True},
'Captured': {'title': _('Captured'), 'toggle':True},
'Uncertainty': {'title': _('+-'), 'toggle':True},
'DueDate': {'title': _('Due Date'),
'index': 'getDueDate',
'toggle':True},
'retested': {'title': _('Retested'), 'type':'boolean', 'toggle':True},
'state_title': {'title': _('State'), 'toggle':True},
}
self.review_states = [
{'id':'default',
'title': _('All'),
'contentFilter':{},
'transitions': [],
'columns':['id',
'getReferenceAnalysesGroupID',
'Category',
'Service',
'Worksheet',
'Method',
'Instrument',
'Result',
'Captured',
'Uncertainty',
'DueDate',
'state_title'],
},
]
self.anjson = {}
def folderitems(self):
items = super(ReferenceAnalysesView, self).folderitems()
items.sort(key=itemgetter('CaptureDate'), reverse=True)
outitems = []
for x in range(len(items)):
if not items[x].has_key('obj') or items[x]['Result'] == '':
continue
obj = items[x]['obj']
service = obj.getService()
items[x]['id'] = obj.getId()
items[x]['Category'] = service.getCategoryTitle()
items[x]['Service'] = service.Title()
items[x]['Captured'] = self.ulocalized_time(obj.getResultCaptureDate())
brefs = obj.getBackReferences("WorksheetAnalysis")
items[x]['Worksheet'] = brefs and brefs[0].Title() or ''
# Create json
qcid = obj.aq_parent.id;
serviceref = "%s (%s)" % (items[x]['Service'], items[x]['Keyword'])
trows = self.anjson.get(serviceref, {});
anrows = trows.get(qcid, []);
anid = '%s.%s' % (items[x]['getReferenceAnalysesGroupID'],
items[x]['id'])
rr = obj.aq_parent.getResultsRangeDict()
uid = service.UID()
if uid in rr:
specs = rr[uid];
try:
smin = float(specs.get('min', 0))
smax = float(specs.get('max', 0))
error = float(specs.get('error', 0))
target = float(specs.get('result', 0))
result = float(items[x]['Result'])
error_amount = ((target / 100) * error) if target > 0 else 0
upper = smax + error_amount
lower = smin - error_amount
anrow = { 'date': items[x]['CaptureDate'],
'min': smin,
'max': smax,
'target': target,
'error': error,
'erroramount': error_amount,
'upper': upper,
'lower': lower,
'result': result,
'unit': items[x]['Unit'],
'id': items[x]['uid'] }
anrows.append(anrow);
trows[qcid] = anrows;
self.anjson[serviceref] = trows
except:
pass
outitems.append(items[x])
return outitems
def get_analyses_json(self):
return json.dumps(self.anjson)
class ReferenceResultsView(BikaListingView):
"""
"""
def __init__(self, context, request):
super(ReferenceResultsView, self).__init__(context, request)
bsc = getToolByName(context, 'bika_setup_catalog')
self.title = self.context.translate(_("Reference Values"))
self.description = self.context.translate(_(
"Click on Analysis Categories (against shaded background) "
"to see Analysis Services in each category. Enter minimum "
"and maximum values to indicate a valid results range. "
"Any result outside this range will raise an alert. "
"The % Error field allows for an % uncertainty to be "
"considered when evaluating results against minimum and "
"maximum values. A result out of range but still in range "
"if the % error is taken into consideration, will raise a "
"less severe alert."))
self.contentFilter = {}
self.context_actions = {}
self.show_sort_column = False
self.show_select_row = False
self.show_workflow_action_buttons = False
self.show_select_column = False
self.pagesize = 999999
self.columns = {
'Service': {'title': _('Service')},
'result': {'title': _('Result')},
'min': {'title': _('Min')},
'max': {'title': _('Max')},
}
self.review_states = [
{'id':'default',
'title': _('All'),
'contentFilter':{},
'columns': ['Service',
'result',
'min',
'max']},
]
def folderitems(self):
items = []
uc = getToolByName(self.context, 'uid_catalog')
# not using <self.contentsMethod=bsc>
for x in self.context.getReferenceResults():
service = uc(UID=x['uid'])[0].getObject()
item = {
'obj': self.context,
'id': x['uid'],
'uid': x['uid'],
'result': x['result'],
'min': x['min'],
'max': x['max'],
'title': service.Title(),
'Service': service.Title(),
'type_class': 'contenttype-ReferenceResult',
'url': service.absolute_url(),
'relative_url': service.absolute_url(),
'view_url': self.context.absolute_url() + "/results",
'replace': {},
'before': {},
'after': {},
'choices':{},
'class': {},
'state_class': 'state-active',
'allow_edit': [],
}
item['replace']['Service'] = "<a href='%s'>%s</a>" % \
(service.absolute_url(), service.Title())
items.append(item)
items = sorted(items, key = itemgetter('Service'))
return items
class ReferenceSamplesView(BikaListingView):
"""Main reference samples folder view
"""
def __init__(self, context, request):
super(ReferenceSamplesView, self).__init__(context, request)
portal = getToolByName(context, 'portal_url').getPortalObject()
self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png"
self.title = self.context.translate(_("Reference Samples"))
self.description = self.context.translate(_("All reference samples in the system are displayed here."))
self.catalog = 'bika_catalog'
self.contentFilter = {'portal_type': 'ReferenceSample',
'sort_on':'id',
'sort_order': 'reverse',
'path':{"query": ["/"], "level" : 0 }, }
self.context_actions = {}
self.show_sort_column = False
self.show_select_row = False
self.show_select_column = True
self.pagesize = 50
request.set('disable_border', 1)
self.columns = {
'ID': {
'title': _('ID'),
'index': 'id'},
'Title': {
'title': _('Title'),
'index': 'sortable_title',
'toggle':True},
'Supplier': {
'title': _('Supplier'),
'toggle':True},
'Definition': {
'title': _('Reference Definition'),
'toggle':True},
'DateSampled': {
'title': _('Date Sampled'),
'index': 'getDateSampled',
'toggle':True},
'DateReceived': {
'title': _('Date Received'),
'index': 'getDateReceived',
'toggle':True},
'ExpiryDate': {
'title': _('Expiry Date'),
'index': 'getExpiryDate',
'toggle':True},
'state_title': {
'title': _('State'),
'toggle':True},
}
self.review_states = [
{'id':'default',
'title': _('Current'),
'contentFilter':{'review_state':'current'},
'columns': ['ID',
'Title',
'Supplier',
'Definition',
'DateSampled',
'DateReceived',
'ExpiryDate']},
{'id':'expired',
'title': _('Expired'),
'contentFilter':{'review_state':'expired'},
'columns': ['ID',
'Title',
'Supplier',
'Definition',
'DateSampled',
'DateReceived',
'ExpiryDate']},
{'id':'disposed',
'title': _('Disposed'),
'contentFilter':{'review_state':'disposed'},
'columns': ['ID',
'Title',
'Supplier',
'Definition',
'DateSampled',
'DateReceived',
'ExpiryDate']},
{'id':'all',
'title': _('All'),
'contentFilter':{},
'columns': ['ID',
'Title',
'Supplier',
'Definition',
'DateSampled',
'DateReceived',
'ExpiryDate',
'state_title']},
]
def folderitems(self):
items = super(ReferenceSamplesView, self).folderitems()
outitems = []
workflow = getToolByName(self.context, 'portal_workflow')
for x in range(len(items)):
if not items[x].has_key('obj'): continue
obj = items[x]['obj']
if workflow.getInfoFor(obj, 'review_state') == 'current':
# Check expiry date
from Products.ATContentTypes.utils import DT2dt
from datetime import datetime
expirydate = DT2dt(obj.getExpiryDate()).replace(tzinfo=None)
if (datetime.today() > expirydate):
workflow.doActionFor(obj, 'expire')
items[x]['review_state'] = 'expired'
items[x]['obj'] = obj
if 'review_state' in self.contentFilter \
and self.contentFilter['review_state'] == 'current':
continue
items[x]['ID'] = obj.id
items[x]['replace']['Supplier'] = "<a href='%s'>%s</a>" % \
(obj.aq_parent.absolute_url(), obj.aq_parent.Title())
if obj.getReferenceDefinition():
items[x]['replace']['Definition'] = "<a href='%s'>%s</a>" % \
(obj.getReferenceDefinition().absolute_url(), obj.getReferenceDefinition().Title())
else:
items[x]['Definition'] = ' '
items[x]['DateSampled'] = self.ulocalized_time(obj.getDateSampled())
items[x]['DateReceived'] = self.ulocalized_time(obj.getDateReceived())
items[x]['ExpiryDate'] = self.ulocalized_time(obj.getExpiryDate())
after_icons = ''
if obj.getBlank():
after_icons += "<img src='++resource++bika.lims.images/blank.png' title='Blank'>"
if obj.getHazardous():
after_icons += "<img src='++resource++bika.lims.images/hazardous.png' title='Hazardous'>"
items[x]['replace']['ID'] = "<a href='%s'>%s</a> %s" % \
(items[x]['url'], items[x]['ID'], after_icons)
outitems.append(items[x])
return outitems
| {
"content_hash": "ae78cfdc591aec91c0105a6d037d8871",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 111,
"avg_line_length": 41.262650602409636,
"alnum_prop": 0.4842910534921747,
"repo_name": "hocinebendou/bika.gsoc",
"id": "cdfabacd409fb5968035238b86f7c010010ce978",
"size": "17124",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bika/lims/browser/referencesample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'django_python3_ldap_test.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "c79e0cff0ea38ed79759739bb7c5ffca",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 69,
"avg_line_length": 29.3,
"alnum_prop": 0.6518771331058021,
"repo_name": "sol33t/django-python3-ldap",
"id": "03a0a44c0e1eaeba164794dc905dce053d9dc673",
"size": "293",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/django_python3_ldap_test/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24743"
}
],
"symlink_target": ""
} |
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisWildwestdomainsComStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.wildwestdomains.com/status_registered.txt"
host = "whois.wildwestdomains.com"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "wildwestdomains.com")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 3)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "cns1.secureserver.net")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "cns2.secureserver.net")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "cns3.secureserver.net")
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(len(self.record.admin_contacts), 1)
eq_(self.record.admin_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.admin_contacts[0].type, yawhois.record.Contact.TYPE_ADMINISTRATIVE)
eq_(self.record.admin_contacts[0].id, None)
eq_(self.record.admin_contacts[0].name, "Wild West Domains Wild West Domains")
eq_(self.record.admin_contacts[0].organization, "Wild West Domains")
eq_(self.record.admin_contacts[0].address, "14455 N Hayden Rd Suite 219")
eq_(self.record.admin_contacts[0].city, "Scottsdale")
eq_(self.record.admin_contacts[0].zip, "85260")
eq_(self.record.admin_contacts[0].state, "Arizona")
eq_(self.record.admin_contacts[0].country, None)
eq_(self.record.admin_contacts[0].country_code, "United States")
eq_(self.record.admin_contacts[0].phone, "+1.4805058800")
eq_(self.record.admin_contacts[0].fax, "+1.4805058844")
eq_(self.record.admin_contacts[0].email, "dns@wildwestdomains.com")
eq_(self.record.admin_contacts[0].created_on, None)
eq_(self.record.admin_contacts[0].updated_on, None)
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2000-08-22 18:29:11'))
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.id, None)
eq_(self.record.registrar.name, "Wild West Domains, LLC")
eq_(self.record.registrar.organization, "Wild West Domains, LLC")
eq_(self.record.registrar.url, "http://www.wildwestdomains.com")
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].id, None)
eq_(self.record.registrant_contacts[0].name, "Wild West Domains Wild West Domains")
eq_(self.record.registrant_contacts[0].organization, "Wild West Domains")
eq_(self.record.registrant_contacts[0].address, "14455 N Hayden Rd Suite 219")
eq_(self.record.registrant_contacts[0].city, "Scottsdale")
eq_(self.record.registrant_contacts[0].zip, "85260")
eq_(self.record.registrant_contacts[0].state, "Arizona")
eq_(self.record.registrant_contacts[0].country, None)
eq_(self.record.registrant_contacts[0].country_code, "United States")
eq_(self.record.registrant_contacts[0].phone, None)
eq_(self.record.registrant_contacts[0].fax, None)
eq_(self.record.registrant_contacts[0].email, None)
eq_(self.record.registrant_contacts[0].created_on, None)
eq_(self.record.registrant_contacts[0].updated_on, None)
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(len(self.record.technical_contacts), 1)
eq_(self.record.technical_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.technical_contacts[0].type, yawhois.record.Contact.TYPE_TECHNICAL)
eq_(self.record.technical_contacts[0].id, None)
eq_(self.record.technical_contacts[0].name, "Wild West Domains Wild West Domains")
eq_(self.record.technical_contacts[0].organization, "Wild West Domains")
eq_(self.record.technical_contacts[0].address, "14455 N Hayden Rd Suite 219")
eq_(self.record.technical_contacts[0].city, "Scottsdale")
eq_(self.record.technical_contacts[0].zip, "85260")
eq_(self.record.technical_contacts[0].state, "Arizona")
eq_(self.record.technical_contacts[0].country, None)
eq_(self.record.technical_contacts[0].country_code, "United States")
eq_(self.record.technical_contacts[0].phone, "+1.4805058800")
eq_(self.record.technical_contacts[0].fax, "+1.4805058844")
eq_(self.record.technical_contacts[0].email, "dns@wildwestdomains.com")
eq_(self.record.technical_contacts[0].created_on, None)
eq_(self.record.technical_contacts[0].updated_on, None)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2011-11-01 16:31:47'))
def test_domain_id(self):
eq_(self.record.domain_id, None)
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2021-11-01 06:59:59'))
| {
"content_hash": "f9451c8ee3065cea9e8556c92f8de7ad",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 96,
"avg_line_length": 53.3304347826087,
"alnum_prop": 0.6536768302625142,
"repo_name": "huyphan/pyyawhois",
"id": "9b2363b5dfaabc77ab32b1d91f176ef1bf2885b2",
"size": "6407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/record/parser/test_response_whois_wildwestdomains_com_status_registered.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1859653"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
from gensim.models.phrases import Phrases
from textblob import TextBlob
# from gensim: threshold represents a threshold for forming the phrases (higher means fewer phrases).
# A phrase of words a and b is accepted if (cnt(a, b) - min_count) * N / (cnt(a) * cnt(b)) > threshold, where N is the total vocabulary size.
thresh = 10
# n = 5
if __name__ == '__main__':
desc = []
doc_id = []
dataset = json.load(open('data/epa.json'))['dataset']
print 'Tokenizing descriptions'
for i, ds in enumerate(dataset):
ds['source'] = 'epa.gov/data.json'
text = TextBlob(ds['description'])
for sentence in text.sentences:
desc.append(sentence.tokens)
doc_id.append(i)
desc_nasa = []
nasa_data = json.load(open('data/nasa.json'))['dataset']
print 'Tokenizing NASA descriptions'
for i, ds in enumerate(nasa_data):
text = TextBlob(ds['description'])
for sentence in text.sentences:
desc_nasa.append(sentence.tokens)
print 'Constructing ngrams'
print 'Bigrams'
# desc_bigrams = Phrases(desc, threshold=thresh)
desc_bigrams = Phrases(desc + desc_nasa, threshold=thresh)
bigrams = desc_bigrams[desc]
print 'Trigrams'
desc_trigrams = Phrases(bigrams, threshold=thresh)
trigrams = desc_trigrams[bigrams]
print 'Fourgrams'
desc_fourgrams = Phrases(trigrams, threshold=thresh)
fourgrams = desc_fourgrams[trigrams]
print 'Fivegrams'
desc_fivegrams = Phrases(fourgrams, threshold=thresh)
fivegrams = desc_fivegrams[fourgrams]
# pull out keywords
print 'Extracting keywords'
field = 'description_ngram_np'
for i, ngram in enumerate(fivegrams):
doc = doc_id[i]
if field not in dataset[doc]:
dataset[doc][field] = set()
if doc > 0 and doc % 1000 == 0:
print '\t', doc
for kw in filter(lambda k: '_' in k, ngram):
keyword = kw.replace('_', ' ')
kw_tb = TextBlob(keyword)
# filter out punctuation, etc (make sure that there are two non-punc words)
if len(kw_tb.words) < 2:
continue
# add keywords which are all proper nouns
distinct_tags = set(t[1] for t in kw_tb.tags)
if distinct_tags - {'NNP', 'NNPS'} == {}:
dataset[doc][field].add(kw_tb.lower())
continue
# add noun phrases
for np in kw_tb.lower().noun_phrases:
dataset[doc][field].add(np)
# convert set into list for json serialization
for d in dataset:
d[field] = list(d[field])
# fix 's
for i, np in enumerate(d[field]):
if np.endswith(" 's"):
np = np[:-3]
d[field][i] = np.replace(" 's", "'s")
d[field] = list(set(d[field]))
with open('data/epa_ngram_np.json', 'w') as f:
json.dump(dataset, f) | {
"content_hash": "b4e5aef8fa608d848190089243b2211d",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 141,
"avg_line_length": 31.302083333333332,
"alnum_prop": 0.5876871880199668,
"repo_name": "jonroberts/nasaMining",
"id": "a5822c5b64c619a42d7183ccb76e8c48aca54e0e",
"size": "3005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bak/epa_ngrams_np.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "116224"
},
{
"name": "JavaScript",
"bytes": "11694"
},
{
"name": "Python",
"bytes": "88008"
}
],
"symlink_target": ""
} |
"""
Stub functions that are used by the Amazon Rekognition unit tests.
"""
from test_tools.example_stubber import ExampleStubber
class RekognitionStubber(ExampleStubber):
"""
A class that implements stub functions used by Amazon Rekognition unit tests.
The stubbed functions expect certain parameters to be passed to them as
part of the tests, and raise errors if the parameters are not as expected.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto3 Rekognition client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
@staticmethod
def _face_to_dict(face):
face_dict = {}
if face.bounding_box is not None:
face_dict['BoundingBox'] = face.bounding_box
if face.confidence is not None:
face_dict['Confidence'] = face.confidence
if face.landmarks is not None:
face_dict['Landmarks'] = face.landmarks
if face.pose is not None:
face_dict['Pose'] = face.pose
if face.quality is not None:
face_dict['Quality'] = face.quality
if face.age_range is not None:
face_dict.update({
'AgeRange': {'Low': face.age_range[0], 'High': face.age_range[1]},
'Smile': {'Value': face.smile},
'Eyeglasses': {'Value': face.eyeglasses},
'Sunglasses': {'Value': face.sunglasses},
'Gender': {'Value': face.gender},
'Beard': {'Value': face.beard},
'Mustache': {'Value': face.mustache},
'EyesOpen': {'Value': face.eyes_open},
'MouthOpen': {'Value': face.mouth_open},
'Emotions': [{'Type': emotion, 'Confidence': 80}
for emotion in face.emotions]})
return face_dict
@staticmethod
def _celebrity_to_dict(celebrity):
return {
'Urls': celebrity.info_urls,
'Name': celebrity.name,
'Id': celebrity.id,
'Face': RekognitionStubber._face_to_dict(celebrity.face)}
@staticmethod
def _person_to_dict(person):
return {
'Index': person.index,
'Face': RekognitionStubber._face_to_dict(person.face)}
@staticmethod
def _label_to_dict(label):
return {
'Name': label.name,
'Confidence': label.confidence,
'Instances': label.instances,
'Parents': label.parents
}
@staticmethod
def _moderation_label_to_dict(label):
return {
'Name': label.name,
'Confidence': label.confidence,
'ParentName': label.parent_name
}
@staticmethod
def _text_to_dict(text):
return {
'DetectedText': text.text,
'Type': text.kind,
'Id': text.id,
'ParentId': text.parent_id,
'Confidence': text.confidence,
'Geometry': text.geometry
}
def stub_detect_faces(self, image, faces, error_code=None):
expected_params = {'Image': image, 'Attributes': ['ALL']}
response = {'FaceDetails': [self._face_to_dict(face) for face in faces]}
self._stub_bifurcator(
'detect_faces', expected_params, response, error_code=error_code)
def stub_compare_faces(
self, source_image, target_image, similarity, matches, unmatches,
error_code=None):
expected_params = {
'SourceImage': source_image,
'TargetImage': target_image,
'SimilarityThreshold': similarity}
response = {
'FaceMatches': [{
'Similarity': similarity,
'Face': self._face_to_dict(match)
} for match in matches],
'UnmatchedFaces': [self._face_to_dict(unmatch) for unmatch in unmatches]}
self._stub_bifurcator(
'compare_faces', expected_params, response, error_code=error_code)
def stub_detect_labels(
self, image, max_labels, labels, error_code=None):
expected_params = {}
if image is not None:
expected_params['Image'] = image
if max_labels is not None:
expected_params['MaxLabels'] = max_labels
response = {'Labels': [self._label_to_dict(label) for label in labels]}
self._stub_bifurcator(
'detect_labels', expected_params, response, error_code=error_code)
def stub_detect_moderation_labels(self, image, labels, error_code=None):
expected_params = {'Image': image}
response = {
'ModerationLabels': [
self._moderation_label_to_dict(label) for label in labels]}
self._stub_bifurcator(
'detect_moderation_labels', expected_params, response,
error_code=error_code)
def stub_detect_text(self, image, texts, error_code=None):
expected_params = {'Image': image}
response = {'TextDetections': [self._text_to_dict(text) for text in texts]}
self._stub_bifurcator(
'detect_text', expected_params, response, error_code=error_code)
def stub_recognize_celebrities(self, image, celebrities, normals, error_code=None):
expected_params = {'Image': image}
response = {
'CelebrityFaces': [
self._celebrity_to_dict(celeb) for celeb in celebrities],
'UnrecognizedFaces': [self._face_to_dict(face) for face in normals]}
self._stub_bifurcator(
'recognize_celebrities', expected_params, response, error_code=error_code)
def stub_describe_collection(self, collection_id, collection, error_code=None):
expected_params = {'CollectionId': collection_id}
response = {
'CollectionARN': collection.collection_arn,
'FaceCount': collection.face_count,
'CreationTimestamp': collection.created
}
self._stub_bifurcator(
'describe_collection', expected_params, response, error_code=error_code)
def stub_delete_collection(self, collection_id, error_code=None):
expected_params = {'CollectionId': collection_id}
self._stub_bifurcator(
'delete_collection', expected_params, error_code=error_code)
def stub_index_faces(
self, collection_id, image, max_faces, indexed_faces, unindexed_faces,
error_code=None):
expected_params = {
'CollectionId': collection_id, 'Image': image.image,
'ExternalImageId': image.image_name, 'MaxFaces': max_faces,
'DetectionAttributes': ['ALL']}
response = {
'FaceRecords': [{
'Face': {'FaceId': face.face_id, 'ImageId': face.image_id},
'FaceDetail': self._face_to_dict(face)
} for face in indexed_faces],
'UnindexedFaces': [{
'FaceDetail': self._face_to_dict(face)
}for face in unindexed_faces]}
self._stub_bifurcator(
'index_faces', expected_params, response, error_code=error_code)
def stub_list_faces(self, collection_id, max_results, faces, error_code=None):
expected_params = {'CollectionId': collection_id, 'MaxResults': max_results}
response = {'Faces': [self._face_to_dict(face) for face in faces]}
self._stub_bifurcator(
'list_faces', expected_params, response, error_code=error_code)
def stub_search_faces_by_image(
self, collection_id, image, threshold, max_faces, image_face,
collection_faces, error_code=None):
expected_params = {
'CollectionId': collection_id, 'Image': image.image,
'FaceMatchThreshold': threshold, 'MaxFaces': max_faces}
response = {
'SearchedFaceBoundingBox': image_face.bounding_box,
'SearchedFaceConfidence': image_face.confidence,
'FaceMatches': [
{'Face': self._face_to_dict(face)} for face in collection_faces]}
self._stub_bifurcator(
'search_faces_by_image', expected_params, response, error_code=error_code)
def stub_search_faces(
self, collection_id, face_id, threshold, max_faces, faces, error_code=None):
expected_params = {
'CollectionId': collection_id, 'FaceId': face_id,
'FaceMatchThreshold': threshold, 'MaxFaces': max_faces}
response = {
'FaceMatches': [{'Face': self._face_to_dict(face)} for face in faces]}
self._stub_bifurcator(
'search_faces', expected_params, response, error_code=error_code)
def stub_delete_faces(self, collection_id, face_ids, error_code=None):
expected_params = {'CollectionId': collection_id, 'FaceIds': face_ids}
response = {'DeletedFaces': face_ids}
self._stub_bifurcator(
'delete_faces', expected_params, response, error_code=error_code)
def stub_create_collection(self, collection_id, collection, error_code=None):
expected_params = {'CollectionId': collection_id}
response = {
'CollectionArn': collection.collection_arn
}
self._stub_bifurcator(
'create_collection', expected_params, response, error_code=error_code)
def stub_list_collections(self, max_results, collection_ids, error_code=None):
expected_params = {'MaxResults': max_results}
response = {'CollectionIds': collection_ids}
self._stub_bifurcator(
'list_collections', expected_params, response, error_code=error_code)
def stub_start_detection(
self, func_name, video, notification_channel, job_id, error_code=None):
expected_params = {'Video': video, 'NotificationChannel': notification_channel}
response = {'JobId': job_id}
self._stub_bifurcator(
func_name, expected_params, response, error_code=error_code)
def stub_get_label_detection(
self, job_id, job_status, labels, error_code=None):
expected_params = {'JobId': job_id}
response = {'JobStatus': job_status, 'Labels': [{
'Timestamp': label.timestamp,
'Label': self._label_to_dict(label)} for label in labels]}
self._stub_bifurcator(
'get_label_detection', expected_params, response, error_code=error_code)
def stub_get_face_detection(
self, job_id, job_status, faces, error_code=None):
expected_params = {'JobId': job_id}
response = {'JobStatus': job_status, 'Faces': [{
'Timestamp': face.timestamp,
'Face': self._face_to_dict(face)} for face in faces]}
self._stub_bifurcator(
'get_face_detection', expected_params, response, error_code=error_code)
def stub_get_person_tracking(
self, job_id, job_status, persons, error_code=None):
expected_params = {'JobId': job_id}
response = {'JobStatus': job_status, 'Persons': [{
'Timestamp': person.timestamp,
'Person': self._person_to_dict(person)} for person in persons]}
self._stub_bifurcator(
'get_person_tracking', expected_params, response, error_code=error_code)
def stub_get_celebrity_recognition(
self, job_id, job_status, celebrities, error_code=None):
expected_params = {'JobId': job_id}
response = {'JobStatus': job_status, 'Celebrities': [{
'Timestamp': celebrity.timestamp,
'Celebrity': self._celebrity_to_dict(celebrity)} for celebrity in celebrities]}
self._stub_bifurcator(
'get_celebrity_recognition', expected_params, response, error_code=error_code)
def stub_get_content_moderation(
self, job_id, job_status, labels, error_code=None):
expected_params = {'JobId': job_id}
response = {'JobStatus': job_status, 'ModerationLabels': [{
'Timestamp': label.timestamp,
'ModerationLabel': self._moderation_label_to_dict(label)}
for label in labels]}
self._stub_bifurcator(
'get_content_moderation', expected_params, response, error_code=error_code)
| {
"content_hash": "ea99c1c34bfd9e114ae8d7619e6f918e",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 91,
"avg_line_length": 43.30313588850174,
"alnum_prop": 0.5963147730930157,
"repo_name": "awsdocs/aws-doc-sdk-examples",
"id": "c95080e35b254f2d8ad3dc62884625f1f3c2ef7e",
"size": "12536",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/test_tools/rekognition_stubber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "476653"
},
{
"name": "Batchfile",
"bytes": "900"
},
{
"name": "C",
"bytes": "3852"
},
{
"name": "C#",
"bytes": "2051923"
},
{
"name": "C++",
"bytes": "943634"
},
{
"name": "CMake",
"bytes": "82068"
},
{
"name": "CSS",
"bytes": "33378"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "Go",
"bytes": "1764292"
},
{
"name": "HTML",
"bytes": "319090"
},
{
"name": "Java",
"bytes": "4966853"
},
{
"name": "JavaScript",
"bytes": "1655476"
},
{
"name": "Jupyter Notebook",
"bytes": "9749"
},
{
"name": "Kotlin",
"bytes": "1099902"
},
{
"name": "Makefile",
"bytes": "4922"
},
{
"name": "PHP",
"bytes": "1220594"
},
{
"name": "Python",
"bytes": "2507509"
},
{
"name": "Ruby",
"bytes": "500331"
},
{
"name": "Rust",
"bytes": "558811"
},
{
"name": "Shell",
"bytes": "63776"
},
{
"name": "Swift",
"bytes": "267325"
},
{
"name": "TypeScript",
"bytes": "119632"
}
],
"symlink_target": ""
} |
import sys
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.bytecodes import dvm
from androguard.core.bytecodes import apk
from androguard.core.analysis import analysis
from androguard.core import androconf
import hashlib
TEST = "examples/android/TestsAndroguard/bin/TestsAndroguard.apk"
androconf.set_debug()
a = apk.APK( TEST )
vm = dvm.DalvikVMFormat( a.get_dex() )
vmx = analysis.VMAnalysis( vm )
for i in vmx.get_methods():
i.create_tags()
tags = i.get_tags()
if not tags.empty():
print(tags)
| {
"content_hash": "c9dfa596792d28b031482f36bf452b38",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 65,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.726775956284153,
"repo_name": "subho007/androguard",
"id": "cb6d70097227ab0b221b0248f25563489902ebaa",
"size": "572",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demos/tags.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "384130"
},
{
"name": "C++",
"bytes": "57006"
},
{
"name": "Makefile",
"bytes": "6008"
},
{
"name": "Python",
"bytes": "27560597"
}
],
"symlink_target": ""
} |
import types
import inspect
import traceback
class _MyAttributeError(Exception):
pass
def convert_attribute_error(f):
def f_(*args, **kwargs):
try:
return f(*args, **kwargs)
except AttributeError, e:
print "~" * 78
traceback.print_exc()
print "~" * 78
raise _MyAttributeError(e)
return f_
class _FGet(object):
def __init__(self, attr):
self.attr = attr
def __call__(self, wrapper):
return wraps(convert_attribute_error(self.attr.fget)(wrapper))
def wraps(obj):
if isinstance(obj, types.ListType) or isinstance(obj, types.TupleType):
return obj.__class__(wraps(obj_) for obj_ in obj)
if hasattr(obj.__class__, '_sa_class_manager'):
try:
return _wrappers[obj.__class__.__name__ + "Wrapper"](obj)
except KeyError:
return obj
return obj
def unwraps(obj):
if isinstance(obj, types.ListType) or isinstance(obj, types.TupleType):
return obj.__class__(unwraps(obj_) for obj_ in obj)
if isinstance(obj, ModelWrapper):
return obj.obj
return obj
_wrappers = {}
class ModelWrapper(object):
class __metaclass__(type):
def __init__(cls, name, bases, nmspc):
type.__init__(cls, name, bases, nmspc)
# register wrappers
_wrappers[cls.__name__] = cls
# decorate wrapper's method:
#
# * convert result object(s) to wrapper(s)
# * convert attribute error, otherwise the underlying object
# will be searched, and finally make bizzare result
for name, attr in cls.__dict__.items():
if isinstance(attr, property) and name not in {'obj'}:
setattr(cls, name, property(fget=_FGet(attr),
fset=attr.fset,
fdel=attr.fdel))
elif inspect.ismethod(attr) and attr not in {'__getattr__',
'__setattr__',
'__unicode__'}:
old = convert_attribute_error(getattr(cls, name))
setattr(cls, name, lambda wrapper, *args,
**kwargs: wraps(old(wrapper, *args, **kwargs)))
def __init__(self, obj):
self.__obj = obj
@property
def obj(self):
return self.__obj
def __getattr__(self, name):
attr = getattr(self.__obj, name)
if isinstance(attr, types.ListType) or isinstance(attr,
types.TupleType):
return type(attr)(wraps(i) for i in attr)
return wraps(attr)
def __setattr__(self, key, value):
# TODO when only key is defined in wrapped object
if key != '_ModelWrapper__obj':
self.__obj.__setattr__(key, value)
else:
self.__dict__[key] = value
def __unicode__(self):
return unicode(self.__obj)
def __dir__(self):
return self.__obj.__dict__.keys()
| {
"content_hash": "8c7afffe47ad253ec87bed8dc6400a22",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 76,
"avg_line_length": 30.941747572815533,
"alnum_prop": 0.5080012550988391,
"repo_name": "PuZheng/lejian-backend",
"id": "697b9563d4a69bd5cfd89778448fec2832ee2361",
"size": "3211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lejian/apis/model_wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "40688"
},
{
"name": "CSS",
"bytes": "35624"
},
{
"name": "HTML",
"bytes": "16595"
},
{
"name": "JavaScript",
"bytes": "127414"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "109344"
},
{
"name": "Shell",
"bytes": "1145"
},
{
"name": "Smarty",
"bytes": "53"
}
],
"symlink_target": ""
} |
from PlaceHoldMachine import PlaceHoldMachine #import class
i = PlaceHoldMachine() #create class instance
i.log_level = 2 #set log level to 2, so we can see errors
# set directories list, where all images will be replaced with placeholders
dirs = [
'C:/MyTemplate/images/demo',
'C:/MyTemplate/images/ads',
'C:/MyTemplate/images/team'
]
i.walk_recursive(dirs) #recursively find all images in given directories
i.start() #begin conversion process | {
"content_hash": "270492da0269827e13fcc9215d9ff408",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 75,
"avg_line_length": 32.142857142857146,
"alnum_prop": 0.7666666666666667,
"repo_name": "Priler/PlaceHoldMachine",
"id": "e0e9ccfc441651f30464e22d3a8dce4e9153e76e",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/simple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9110"
}
],
"symlink_target": ""
} |
"""
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no specialized interface for complex Hermitian matrices.
# To find eigenvalues of a complex Hermitian matrix you
# may use eigsh(), but eigsh() will simply call eigs()
# and return the real part of the eigenvalues thus obtained.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'ArpackError', 'ArpackNoConvergence']
from . import _arpack
arpack_int = _arpack.timing.nbx.dtype
import numpy as np
import warnings
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import eye, issparse, isspmatrix, isspmatrix_csr
from scipy.linalg import eig, eigh, lu_factor, lu_solve
from scipy.sparse._sputils import isdense, is_pydata_spmatrix
from scipy.sparse.linalg import gmres, splu
from scipy._lib._util import _aligned_zeros
from scipy._lib._threadsafety import ReentrancyLock
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error"
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"
}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
def choose_ncv(k):
"""
Choose number of lanczos vectors based on target number
of singular/eigen values and vectors to compute, k.
"""
return max(2 * k + 1, 20)
class _ArpackParams:
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
# ARPACK will use a random initial vector.
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = choose_ncv(k)
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, arpack_int)
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than ndim(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
self.workd = _aligned_zeros(3 * n, self.tp)
self.workl = _aligned_zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, arpack_int)
def iterate(self):
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than ndim(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
self.workd = _aligned_zeros(3 * n, self.tp)
self.workl = _aligned_zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, arpack_int)
if self.tp in 'FD':
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
self.rwork = _aligned_zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(return_eigenvectors,
howmny, sselect, sigmar, sigmai, workev,
self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr,
self.workd, self.workl, self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
if self.mode in (1, 2):
rd = d
elif self.mode in (3, 4):
rd = 1 / (d - self.sigma)
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) (complex pairs come together)
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
ind = ind[-k:][::-1]
elif self.which in ['SR', 'SM', 'SI']:
ind = ind[:k]
d = d[ind]
z = z[:, ind]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(return_eigenvectors,
howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr,
self.workd, self.workl, self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
self.shape = M.shape
self.dtype = M.dtype
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
x = np.asarray(x)
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x).astype(self.dtype))
+ 1j * self.M_lu.solve(np.imag(x).astype(self.dtype)))
else:
return self.M_lu.solve(x.astype(self.dtype))
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
self.shape = M.shape
self.dtype = M.dtype
def _matvec(self, x):
return lu_solve(self.M_lu, x)
def gmres_loose(A, b, tol):
"""
gmres with looser termination condition.
"""
b = np.asarray(b)
min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps
return gmres(A, b, tol=max(tol, min_tol), atol=0)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres_loose, tol=0):
self.M = M
if hasattr(M, 'dtype'):
self.dtype = M.dtype
else:
x = np.zeros(M.shape[1])
self.dtype = (M * x).dtype
self.shape = M.shape
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = 2 * np.finfo(self.dtype).eps
self.ifunc = ifunc
self.tol = tol
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0):
self.A = A
self.M = M
self.sigma = sigma
def mult_func(x):
return A.matvec(x) - sigma * M.matvec(x)
def mult_func_M_None(x):
return A.matvec(x) - sigma * x
x = np.zeros(A.shape[1])
if M is None:
dtype = mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
mult_func_M_None,
dtype=dtype)
else:
dtype = mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
mult_func,
dtype=dtype)
self.shape = A.shape
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = 2 * np.finfo(self.OP.dtype).eps
self.ifunc = ifunc
self.tol = tol
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
@property
def dtype(self):
return self.OP.dtype
def _fast_spmatrix_to_csc(A, hermitian=False):
"""Convert sparse matrix to CSC (by transposing, if possible)"""
if (isspmatrix_csr(A) and hermitian
and not np.issubdtype(A.dtype, np.complexfloating)):
return A.T
elif is_pydata_spmatrix(A):
# No need to convert
return A
else:
return A.tocsc()
def get_inv_matvec(M, hermitian=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M) or is_pydata_spmatrix(M):
M = _fast_spmatrix_to_csc(M, hermitian=hermitian)
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, hermitian=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, hermitian=hermitian, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A) or is_pydata_spmatrix(A):
A = A - sigma * eye(A.shape[0])
A = _fast_spmatrix_to_csc(A, hermitian=hermitian)
return SpLuInv(A).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A),
M, sigma, tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A) and not is_pydata_spmatrix(A)) or
(not isdense(M) and not isspmatrix(M) and not is_pydata_spmatrix(A))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M),
sigma, tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
OP = _fast_spmatrix_to_csc(OP, hermitian=hermitian)
return SpLuInv(OP).matvec
# ARPACK is not threadsafe or reentrant (SAVE variables), so we need a
# lock and a re-entering check.
_ARPACK_LOCK = ReentrancyLock("Nested calls to eigs/eighs not allowed: "
"ARPACK is not re-entrant")
def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None,
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
Minv=None, OPinv=None, OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : ndarray, sparse matrix or LinearOperator
An array, sparse matrix, or LinearOperator representing
the operation ``A @ x``, where A is a real or complex square matrix.
k : int, optional
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N-1. It is not possible to compute all
eigenvectors of a matrix.
M : ndarray, sparse matrix or LinearOperator, optional
An array, sparse matrix, or LinearOperator representing
the operation M@x for the generalized eigenvalue problem
A @ x = w * M @ x.
M must represent a real symmetric matrix if A is real, and must
represent a complex Hermitian matrix if A is complex. For best
results, the data type of M should be the same as that of A.
Additionally:
If `sigma` is None, M is positive definite
If sigma is specified, M is positive semi-definite
If sigma is None, eigs requires an operator to compute the solution
of the linear equation ``M @ x = b``. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
``x = Minv @ b = M^-1 @ b``.
sigma : real or complex, optional
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
``[A - sigma * M] @ x = b``, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues ``w'[i]`` where:
If A is real and OPpart == 'r' (default),
``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``.
If A is real and OPpart == 'i',
``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``.
If A is complex, ``w'[i] = 1/(w[i]-sigma)``.
v0 : ndarray, optional
Starting vector for iteration.
Default: random
ncv : int, optional
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
Default: ``min(n, max(2*k + 1, 20))``
which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional
Which `k` eigenvectors and eigenvalues to find:
'LM' : largest magnitude
'SM' : smallest magnitude
'LR' : largest real part
'SR' : smallest real part
'LI' : largest imaginary part
'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed
Default: ``n*10``
tol : float, optional
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : bool, optional
Return eigenvectors (True) in addition to eigenvalues
Minv : ndarray, sparse matrix or LinearOperator, optional
See notes in M, above.
OPinv : ndarray, sparse matrix or LinearOperator, optional
See notes in sigma, above.
OPpart : {'r' or 'i'}, optional
See notes in sigma, above
Returns
-------
w : ndarray
Array of k eigenvalues.
v : ndarray
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from scipy.sparse.linalg import eigs
>>> id = np.eye(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0:
raise ValueError("k=%d must be greater than 0." % k)
if k >= n - 1:
warnings.warn("k >= N - 1 for N * N square matrix. "
"Attempting to use scipy.linalg.eig instead.",
RuntimeWarning)
if issparse(A):
raise TypeError("Cannot use scipy.linalg.eig for sparse A with "
"k >= N - 1. Use scipy.linalg.eig(A.toarray()) or"
" reduce k.")
if isinstance(A, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
"A with k >= N - 1.")
if isinstance(M, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
"M with k >= N - 1.")
return eig(A, b=M, right=return_eigenvectors)
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
hermitian=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
with _ARPACK_LOCK:
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None,
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
Minv=None, OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex Hermitian matrix A.
Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i].
Note that there is no specialized routine for the case when A is a complex
Hermitian matrix. In this case, ``eigsh()`` will call ``eigs()`` and return the
real parts of the eigenvalues thus obtained.
Parameters
----------
A : ndarray, sparse matrix or LinearOperator
A square operator representing the operation ``A @ x``, where ``A`` is
real symmetric or complex Hermitian. For buckling mode (see below)
``A`` must additionally be positive-definite.
k : int, optional
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array representing the `k` eigenvectors. The column ``v[:, i]`` is
the eigenvector corresponding to the eigenvalue ``w[i]``.
Other Parameters
----------------
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation ``M @ x`` for the generalized eigenvalue problem
A @ x = w * M @ x.
M must represent a real symmetric matrix if A is real, and must
represent a complex Hermitian matrix if A is complex. For best
results, the data type of M should be the same as that of A.
Additionally:
If sigma is None, M is symmetric positive definite.
If sigma is specified, M is symmetric positive semi-definite.
In buckling mode, M is symmetric indefinite.
If sigma is None, eigsh requires an operator to compute the solution
of the linear equation ``M @ x = b``. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
``x = Minv @ b = M^-1 @ b``.
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
``[A - sigma * M] x = b``, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues ``w'[i]`` where:
if mode == 'normal', ``w'[i] = 1 / (w[i] - sigma)``.
if mode == 'cayley', ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``.
if mode == 'buckling', ``w'[i] = w[i] / (w[i] - sigma)``.
(see further discussion in 'mode' below)
v0 : ndarray, optional
Starting vector for iteration.
Default: random
ncv : int, optional
The number of Lanczos vectors generated ncv must be greater than k and
smaller than n; it is recommended that ``ncv > 2*k``.
Default: ``min(n, max(2*k + 1, 20))``
which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex Hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find:
'LM' : Largest (in magnitude) eigenvalues.
'SM' : Smallest (in magnitude) eigenvalues.
'LA' : Largest (algebraic) eigenvalues.
'SA' : Smallest (algebraic) eigenvalues.
'BE' : Half (k/2) from each end of the spectrum.
When k is odd, return one more (k/2+1) from the high end.
When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]``
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed.
Default: ``n*10``
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : bool
Return eigenvectors (True) in addition to eigenvalues.
This value determines the order in which eigenvalues are sorted.
The sort order is also dependent on the `which` variable.
For which = 'LM' or 'SA':
If `return_eigenvectors` is True, eigenvalues are sorted by
algebraic value.
If `return_eigenvectors` is False, eigenvalues are sorted by
absolute value.
For which = 'BE' or 'LA':
eigenvalues are always sorted by algebraic value.
For which = 'SM':
If `return_eigenvectors` is True, eigenvalues are sorted by
algebraic value.
If `return_eigenvectors` is False, eigenvalues are sorted by
decreasing absolute value.
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP @ x'[i] = w'[i] * B @ x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A @ x[i] = w[i] * M @ x[i]``.
The modes are as follows:
'normal' :
OP = [A - sigma * M]^-1 @ M,
B = M,
w'[i] = 1 / (w[i] - sigma)
'buckling' :
OP = [A - sigma * M]^-1 @ A,
B = A,
w'[i] = w[i] / (w[i] - sigma)
'cayley' :
OP = [A - sigma * M]^-1 @ [A + sigma * M],
B = M,
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion).
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
Examples
--------
>>> from scipy.sparse.linalg import eigsh
>>> identity = np.eye(13)
>>> eigenvalues, eigenvectors = eigsh(identity, k=6)
>>> eigenvalues
array([1., 1., 1., 1., 1., 1.])
>>> eigenvectors.shape
(13, 6)
"""
# complex Hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0:
raise ValueError("k must be greater than 0.")
if k >= n:
warnings.warn("k >= N for N * N square matrix. "
"Attempting to use scipy.linalg.eigh instead.",
RuntimeWarning)
if issparse(A):
raise TypeError("Cannot use scipy.linalg.eigh for sparse A with "
"k >= N. Use scipy.linalg.eigh(A.toarray()) or"
" reduce k.")
if isinstance(A, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
"A with k >= N.")
if isinstance(M, LinearOperator):
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
"M with k >= N.")
return eigh(A, b=M, eigvals_only=not return_eigenvectors)
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
hermitian=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
hermitian=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
hermitian=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
with _ARPACK_LOCK:
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
| {
"content_hash": "5da9f789201672969fea8651e3aead51",
"timestamp": "",
"source": "github",
"line_count": 1691,
"max_line_length": 89,
"avg_line_length": 39.7806031933767,
"alnum_prop": 0.5359080705822891,
"repo_name": "grlee77/scipy",
"id": "80d7465500443c2f800738738526950ba4f327a8",
"size": "67269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/sparse/linalg/eigen/arpack/arpack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4818733"
},
{
"name": "C++",
"bytes": "3180413"
},
{
"name": "CMake",
"bytes": "29273"
},
{
"name": "Cython",
"bytes": "1033901"
},
{
"name": "Dockerfile",
"bytes": "9777"
},
{
"name": "Fortran",
"bytes": "5298461"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "14215612"
},
{
"name": "Shell",
"bytes": "3533"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage',
'sphinx.ext.graphviz', 'sphinx.ext.inheritance_diagram',
'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'numconv'
copyright = u'2008-2010, Gustavo Picon'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'numconvdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'numconv.tex', u'numconv Documentation',
u'Gustavo Picon', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| {
"content_hash": "0530a8a75504a3b18236206b48f76252",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 80,
"avg_line_length": 32.94535519125683,
"alnum_prop": 0.7072482998838945,
"repo_name": "tabo/numconv",
"id": "1f1abfb15e261d9ccecb4bdeb163351ee7f02939",
"size": "6447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16270"
},
{
"name": "Shell",
"bytes": "742"
}
],
"symlink_target": ""
} |
class FileManager:
def HasFile(self, filename):
return filename in self.files
def GetFile(self, filename):
return self.files[filename]
def main():
...
assert fm.HasFile(opts.prefix + 'calendar/events/2012-01-02-foo.html')
text = fm.GetFile(opts.prefix + 'calendar/events/2012-01-02-foo.html')
dom = etree.parse(StringIO(text))
assert dom.xpath('//title').text == 'Foo - Stanford Humanities Center'
assert dom.xpath('//div[@id = "topnext"]')
assert dom.xpath('//div[@id = "topnext"]')['href'] == 'calendar/events/2012-02.html'
assert dom.xpath('//div[@id = "topnext"]').text == "Next"
| {
"content_hash": "ce4389512db88be7957f9dff60ce88c7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 86,
"avg_line_length": 29.476190476190474,
"alnum_prop": 0.6623586429725363,
"repo_name": "starpow971/Stanford-Humanities-Center-Updater",
"id": "6dccb939a2db28e6234aed603ab9715c1a6b69bd",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "80689"
},
{
"name": "Shell",
"bytes": "74"
}
],
"symlink_target": ""
} |
import fastq_index
import sys
from fastq_lookup import DictionaryLookup, BinaryLookup
lookup1 = DictionaryLookup("test/test_index", "test/PE-173.40k.fastq")
lookup2 = BinaryLookup("test/test_index", "test/PE-173.40k.fastq")
to_lookup = []
count = 0
for line in open("test/PE-173.40k.fastq", 'r'):
if count % 4 ==0:
to_lookup.append(line.strip())
count += 1
for id in to_lookup:
res1 = lookup1.find_entry(id)
res2 = lookup2.find_entry(id)
if res1 != res2:
print >> sys.stderr, "TEST FAILED", id
sys.exit(-1)
if res1[0] != id:
print >> sys.stderr, "TEST FAILED", id
sys.exit(-1)
if res2[0] != id:
print >> sys.stderr, "TEST FAILED", id
sys.exit(-1)
print "TESTS PASSED"
| {
"content_hash": "c487aba879bc0671f2244c1f7e0b8fd9",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 70,
"avg_line_length": 26.103448275862068,
"alnum_prop": 0.6129458388375165,
"repo_name": "hillst/FastqIndex",
"id": "cfa3a598e6ec0642bb1e554a8c38fc5a0db24b5f",
"size": "757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "153"
},
{
"name": "Python",
"bytes": "8158"
},
{
"name": "Shell",
"bytes": "163"
}
],
"symlink_target": ""
} |
'''sample usage of exchrate package'''
import exchrate # import package
from exchrate import exrateparse # import parse module
from exchrate.exrateparse import ExchangeRateParse # import class directly
from pprint import pprint
# set parameters
exratesrc = 'NBU-json' # exchange rate source
exratedate = ('2016-12-01', '2016-12-02') # exchange rate dates
localcur = 'UAH' # local currency code 1 base = x local
basecur = 'USD' # base currency code
params = (exratesrc, exratedate, basecur, localcur)
def get_exch_rate_example():
'''example of getting exchange rate'''
# initialize objects from different import forms
e1 = exchrate.ExchangeRateParse(*params)
e2 = exrateparse.ExchangeRateParse(*params)
e3 = ExchangeRateParse(*params)
# assign result of method
r1 = e1.get_exch_rate()
# assign instance variable holding last result
t = e2.get_exch_rate()
r2 = e2._last_result
r3 = e3.get_exch_rate()
# print results
print('')
print('------------ START - get_exch_rate_example ------------')
print('Import package')
pprint(r1)
print('____')
print('Import parsing module')
pprint(r2)
print('____')
print('Import class directly')
pprint(r3)
print('____')
print('------------ END - get_exch_rate_example ------------')
def use_exch_rate_calc():
'''example of using exchange rate result in calculation'''
# sample data for calculation
price_eur = 150.25 # e.g. we have price in EUR and we want to convert
# it to PLN for displaying
# create object
e = ExchangeRateParse(*params)
# change object settings
e.set_source('ECB-Fixer') # update exchange rate source
e.exratedate = '2016-12-01' # update exchange rate date
e.localcur = 'PLN' # update local currency (1 EUR = x PLN)
e.basecur = 'EUR' # update base currency (to)
price_pln = price_eur * next(iter(e.get_exch_rate()), None).exrate
print('')
print('------------ START - use_exch_rate_calc ------------')
print('{0} EUR = {1:.2f} PLN (on {2})'.format(price_eur, price_pln, e.exratedate))
print('------------ END - use_exch_rate_calc ------------')
if __name__ == '__main__':
get_exch_rate_example()
use_exch_rate_calc()
| {
"content_hash": "94b2009e636e943fd90b87a1af8928d2",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 86,
"avg_line_length": 32.40845070422535,
"alnum_prop": 0.6162538026944807,
"repo_name": "anton-shestakov/exchrate",
"id": "2bf5760a10f9c19b79a880c8f1c754227c470a0a",
"size": "2323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/simple_usage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21774"
}
],
"symlink_target": ""
} |
import inspect
import json
from contextlib import ExitStack
from typing import Iterable
import base58
from plenum.common.constants import REQACK, TXN_ID, DATA
from stp_core.common.log import getlogger
from plenum.common.signer_simple import SimpleSigner
from plenum.common.util import getMaxFailures, runall
from plenum.test.helper import TestNodeSet as PlenumTestNodeSet
from plenum.test.helper import waitForSufficientRepliesForRequests, \
checkLastClientReqForNode, buildCompletedTxnFromReply
from plenum.test.test_node import checkNodesAreReady, TestNodeCore
from plenum.test.test_node import checkNodesConnected
from plenum.test.testable import spyable
from plenum.test import waits as plenumWaits, waits
from sovrin_client.client.wallet.attribute import LedgerStore, Attribute
from sovrin_client.client.wallet.wallet import Wallet
from sovrin_client.test.helper import genTestClient, genTestClientProvider
from sovrin_common.constants import ATTRIB, TARGET_NYM, TXN_TYPE, GET_NYM
from sovrin_common.test.helper import TempStorage
from sovrin_node.server.node import Node
from sovrin_node.server.upgrader import Upgrader
from stp_core.loop.eventually import eventually
from stp_core.loop.looper import Looper
logger = getlogger()
class Scenario(ExitStack):
"""
Test context
simple container to toss in a dynamic context to streamline testing
"""
def __init__(self,
nodeCount=None,
nodeRegistry=None,
nodeSet=None,
looper=None,
tmpdir=None):
super().__init__()
self.actor = None # type: Organization
if nodeSet is None:
self.nodes = self.enter_context(TestNodeSet(count=nodeCount,
nodeReg=nodeRegistry,
tmpdir=tmpdir))
else:
self.nodes = nodeSet
self.nodeReg = self.nodes.nodeReg
if looper is None:
self.looper = self.enter_context(Looper(self.nodes))
else:
self.looper = looper
self.tmpdir = tmpdir
self.ran = [] # history of what has been run
self.userId = None
self.userNym = None
self.trustAnchor = None
self.trustAnchorNym = None
self.agent = None
self.agentNym = None
def run(self, *coros):
new = []
for c in coros:
if inspect.isfunction(c) or inspect.ismethod(c):
new.append(c(self)) # call it with this context
else:
new.append(c)
if new:
result = self.looper.run(*new)
self.ran.extend(coros)
return result
def ensureRun(self, *coros):
"""
Ensures the coro gets run, in other words, this method optionally
runs the coro if it has not already been run in this scenario
:param coros:
:return:
"""
unrun = [c for c in coros if c not in self.ran]
return self.run(*unrun)
async def start(self):
await checkNodesConnected(self.nodes)
timeout = plenumWaits.expectedPoolStartUpTimeout(len(self.nodes))
await eventually(checkNodesAreReady,
self.nodes,
retryWait=.25,
timeout=timeout,
ratchetSteps=10)
async def startClient(self, org=None):
org = org if org else self.actor
self.looper.add(org.client)
await org.client.ensureConnectedToNodes()
def copyOfInBox(self, org=None):
org = org if org else self.actor
return org.client.inBox.copy()
async def checkAcks(self, org=None, count=1, minusInBox=None):
org = org if org else self.actor
ib = self.copyOfInBox(org)
if minusInBox:
for x in minusInBox:
ib.remove(x)
timeout = plenumWaits.expectedReqAckQuorumTime()
for node in self.nodes:
await eventually(self.checkInboxForReAck,
org.client.name,
ib,
REQACK,
node,
count,
retryWait=.1,
timeout=timeout,
ratchetSteps=10)
@staticmethod
def checkInboxForReAck(clientName, clientInBox, op, fromNode,
expectedCount: int):
msg = 'Got your request client ' + clientName
actualCount = sum(
1 for x in clientInBox
if x[0]['op'] == op and x[1] == fromNode.clientstack.name)
assert actualCount == expectedCount
async def checkReplies(self,
reqs,
org=None,
retryWait=.25,
timeout=None,
ratchetSteps=10):
org = org if org else self.actor
if not isinstance(reqs, Iterable):
reqs = [reqs]
nodeCount = sum(1 for _ in self.nodes)
f = getMaxFailures(nodeCount)
corogen = (eventually(waitForSufficientRepliesForRequests,
org.client.inBox,
r.reqId,
f,
retryWait=retryWait,
timeout=timeout,
ratchetSteps=ratchetSteps) for r in reqs)
return await runall(corogen)
async def send(self, op, org=None):
org = org if org else self.actor
req = org.client.submit(op)[0]
timeout = plenumWaits.expectedTransactionExecutionTime(
len(self.nodes))
for node in self.nodes:
await eventually(checkLastClientReqForNode,
node,
req,
retryWait=1,
timeout=timeout)
return req
async def sendAndCheckAcks(self, op, count: int = 1, org=None):
baseline = self.copyOfInBox() # baseline of client inBox so we can
# net it out
req = await self.send(op, org)
await self.checkAcks(count=count, minusInBox=baseline)
return req
def genOrg(self):
cli = genTestClientProvider(nodes=self.nodes,
nodeReg=self.nodeReg.extractCliNodeReg(),
tmpdir=self.tmpdir)
return Organization(cli)
def addAgent(self):
self.agent = self.genOrg()
return self.agent
def addTrustAnchor(self):
self.trustAnchor = self.genOrg()
return self.trustAnchor
class Organization:
def __init__(self, client=None):
self.client = client
self.wallet = Wallet(self.client) # created only once per organization
self.userWallets = {} # type: Dict[str, Wallet]
def removeUserWallet(self, userId: str):
if userId in self.userWallets:
del self.userWallets[userId]
else:
raise ValueError("No wallet exists for this user id")
def addTxnsForCompletedRequestsInWallet(self, reqs: Iterable, wallet:
Wallet):
for req in reqs:
reply, status = self.client.getReply(req.reqId)
if status == "CONFIRMED":
# TODO Figure out the actual implementation of
# TODO `buildCompletedTxnFromReply`. This is just a stub
# TODO implementation
txn = buildCompletedTxnFromReply(req, reply)
# TODO Move this logic in wallet
if txn['txnType'] == ATTRIB and txn['data'] is not None:
attr = list(txn['data'].keys())[0]
if attr in wallet.attributeEncKeys:
key = wallet.attributeEncKeys.pop(attr)
txn['secretKey'] = key
wallet.addCompletedTxn(txn)
@spyable(methods=[Upgrader.processLedger])
class TestUpgrader(Upgrader):
pass
# noinspection PyShadowingNames,PyShadowingNames
@spyable(
methods=[Node.handleOneNodeMsg, Node.processRequest, Node.processOrdered,
Node.postToClientInBox, Node.postToNodeInBox, "eatTestMsg",
Node.decidePrimaries, Node.startViewChange, Node.discard,
Node.reportSuspiciousNode, Node.reportSuspiciousClient,
Node.processRequest, Node.processPropagate, Node.propagate,
Node.forward, Node.send, Node.processInstanceChange,
Node.checkPerformance, Node.getReplyFromLedger])
class TestNode(TempStorage, TestNodeCore, Node):
def __init__(self, *args, **kwargs):
Node.__init__(self, *args, **kwargs)
TestNodeCore.__init__(self, *args, **kwargs)
self.cleanupOnStopping = True
def getUpgrader(self):
return TestUpgrader(self.id, self.name, self.dataLocation, self.config,
self.configLedger)
def getDomainReqHandler(self):
return Node.getDomainReqHandler(self)
def onStopping(self, *args, **kwargs):
# self.graphStore.store.close()
super().onStopping(*args, **kwargs)
if self.cleanupOnStopping:
self.cleanupDataLocation()
class TestNodeSet(PlenumTestNodeSet):
def __init__(self,
names: Iterable[str] = None,
count: int = None,
nodeReg=None,
tmpdir=None,
keyshare=True,
primaryDecider=None,
pluginPaths: Iterable[str] = None,
testNodeClass=TestNode):
super().__init__(names, count, nodeReg, tmpdir, keyshare,
primaryDecider=primaryDecider,
pluginPaths=pluginPaths,
testNodeClass=testNodeClass)
def checkSubmitted(looper, client, optype, txnsBefore):
txnsAfter = []
def checkTxnCountAdvanced():
nonlocal txnsAfter
txnsAfter = client.getTxnsByType(optype)
logger.debug("old and new txns {} {}".format(txnsBefore, txnsAfter))
assert len(txnsAfter) > len(txnsBefore)
timeout = plenumWaits.expectedReqAckQuorumTime()
looper.run(eventually(checkTxnCountAdvanced, retryWait=1,
timeout=timeout))
txnIdsBefore = [txn[TXN_ID] for txn in txnsBefore]
txnIdsAfter = [txn[TXN_ID] for txn in txnsAfter]
logger.debug("old and new txnids {} {}".format(txnIdsBefore, txnIdsAfter))
return list(set(txnIdsAfter) - set(txnIdsBefore))
def submitAndCheck(looper, client, wallet, op, identifier=None):
# TODO: This assumes every transaction will have an edge in graph, why?
# Fix this
optype = op[TXN_TYPE]
txnsBefore = client.getTxnsByType(optype)
req = wallet.signOp(op, identifier=identifier)
wallet.pendRequest(req)
reqs = wallet.preparePending()
client.submitReqs(*reqs)
return checkSubmitted(looper, client, optype, txnsBefore)
def makePendingTxnsRequest(client, wallet):
wallet.pendSyncRequests()
prepared = wallet.preparePending()
client.submitReqs(*prepared)
def makeGetNymRequest(client, wallet, nym):
op = {
TARGET_NYM: nym,
TXN_TYPE: GET_NYM,
}
req = wallet.signOp(op)
# TODO: This looks boilerplate
wallet.pendRequest(req)
reqs = wallet.preparePending()
return client.submitReqs(*reqs)[0]
def makeAttribRequest(client, wallet, attrib):
wallet.addAttribute(attrib)
# TODO: This looks boilerplate
reqs = wallet.preparePending()
return client.submitReqs(*reqs)[0]
def _newWallet(name=None):
signer = SimpleSigner()
w = Wallet(name or signer.identifier)
w.addIdentifier(signer=signer)
return w
def addAttributeAndCheck(looper, client, wallet, attrib):
old = wallet.pendingCount
pending = wallet.addAttribute(attrib)
assert pending == old + 1
reqs = wallet.preparePending()
client.submitReqs(*reqs)
def chk():
assert wallet.getAttribute(attrib).seqNo is not None
timeout = plenumWaits.expectedTransactionExecutionTime(client.totalNodes)
looper.run(eventually(chk, retryWait=1, timeout=timeout))
return wallet.getAttribute(attrib).seqNo
def addRawAttribute(looper, client, wallet, name, value, dest=None,
localName=None):
if not localName:
localName = name
attrData = json.dumps({name: value})
attrib = Attribute(name=localName,
origin=wallet.defaultId,
value=attrData,
dest=dest,
ledgerStore=LedgerStore.RAW)
addAttributeAndCheck(looper, client, wallet, attrib)
def checkGetAttr(reqKey, trustAnchor, attrName, attrValue):
reply, status = trustAnchor.getReply(*reqKey)
assert reply
data = json.loads(reply.get(DATA))
assert status == "CONFIRMED" and \
(data is not None and data.get(attrName) == attrValue)
return reply
def getAttribute(
looper,
trustAnchor,
trustAnchorWallet,
userIdA,
attributeName,
attributeValue):
# Should be renamed to get_attribute_and_check
attrib = Attribute(name=attributeName,
value=None,
dest=userIdA,
ledgerStore=LedgerStore.RAW)
req = trustAnchorWallet.requestAttribute(
attrib, sender=trustAnchorWallet.defaultId)
trustAnchor.submitReqs(req)
timeout = waits.expectedTransactionExecutionTime(len(trustAnchor.nodeReg))
return looper.run(eventually(checkGetAttr, req.key, trustAnchor,
attributeName, attributeValue, retryWait=1,
timeout=timeout))
def buildStewardClient(looper, tdir, stewardWallet):
s, _ = genTestClient(tmpdir=tdir, usePoolLedger=True)
s.registerObserver(stewardWallet.handleIncomingReply)
looper.add(s)
looper.run(s.ensureConnectedToNodes())
makePendingTxnsRequest(s, stewardWallet)
return s
base58_alphabet = set(base58.alphabet)
def check_str_is_base58_compatible(str):
return not (set(str) - base58_alphabet)
| {
"content_hash": "1d01b655c866f8c6172da542be46fa64",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 79,
"avg_line_length": 35.33990147783251,
"alnum_prop": 0.6021745190967382,
"repo_name": "keenondrums/sovrin-node",
"id": "aaeaf4143b74e8a33858f913c3b7159596788168",
"size": "14348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sovrin_node/test/helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "1088655"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "15720"
}
],
"symlink_target": ""
} |
from . import cmake_product
from . import earlyswiftdriver
class CMark(cmake_product.CMakeProduct):
@classmethod
def is_build_script_impl_product(cls):
"""is_build_script_impl_product -> bool
Whether this product is produced by build-script-impl.
"""
return False
@classmethod
def is_before_build_script_impl_product(cls):
"""is_before_build_script_impl_product -> bool
Whether this product is built before any build-script-impl products.
"""
return True
# EarlySwiftDriver is the root of the graph, and is the only dependency of
# this product.
@classmethod
def get_dependencies(cls):
return [earlyswiftdriver.EarlySwiftDriver]
def should_build(self, host_target):
"""should_build() -> Bool
Whether or not this product should be built with the given arguments.
"""
return self.args.build_cmark
def build(self, host_target):
"""build() -> void
Perform the build, for a non-build-script-impl product.
"""
self.cmake_options.define('CMAKE_BUILD_TYPE:STRING',
self.args.cmark_build_variant)
self.cmake_options.define('CMARK_THREADING', 'ON')
(platform, arch) = host_target.split('-')
common_c_flags = ' '.join(self.common_cross_c_flags(platform, arch))
self.cmake_options.define('CMAKE_C_FLAGS', common_c_flags)
self.cmake_options.define('CMAKE_CXX_FLAGS', common_c_flags)
if host_target.startswith("macosx") or \
host_target.startswith("iphone") or \
host_target.startswith("appletv") or \
host_target.startswith("watch"):
toolchain_file = self.generate_darwin_toolchain_file(platform, arch)
self.cmake_options.define('CMAKE_TOOLCHAIN_FILE:PATH', toolchain_file)
elif platform == "linux":
toolchain_file = self.generate_linux_toolchain_file(platform, arch)
self.cmake_options.define('CMAKE_TOOLCHAIN_FILE:PATH', toolchain_file)
self.build_with_cmake(["all"], self.args.cmark_build_variant, [])
def should_test(self, host_target):
"""should_test() -> Bool
Whether or not this product should be tested with the given arguments.
"""
if self.is_cross_compile_target(host_target):
return False
return self.args.test_cmark
def test(self, host_target):
"""
Perform the test phase for the product.
This phase might build and execute the product tests.
"""
executable_target = 'api_test'
results_targets = ['test']
if self.args.cmake_generator == 'Xcode':
# Xcode generator uses "RUN_TESTS" instead of "test".
results_targets = ['RUN_TESTS']
self.test_with_cmake(executable_target, results_targets,
self.args.cmark_build_variant, [])
def should_install(self, host_target):
"""should_install() -> Bool
Whether or not this product should be installed with the given
arguments.
"""
return self.args.install_all
def install(self, host_target):
"""
Perform the install phase for the product.
This phase might copy the artifacts from the previous phases into a
destination directory.
"""
self.install_with_cmake(["install"], self.host_install_destdir(host_target))
| {
"content_hash": "b074e59c4384ab065650bb9772d4db0b",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 84,
"avg_line_length": 33.99029126213592,
"alnum_prop": 0.61753784632962,
"repo_name": "ahoppen/swift",
"id": "bde199b37e0de49cca889f9e2f0b355d76720b98",
"size": "4008",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "utils/swift_build_support/swift_build_support/products/cmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "45571"
},
{
"name": "C",
"bytes": "5428926"
},
{
"name": "C++",
"bytes": "46840716"
},
{
"name": "CMake",
"bytes": "694118"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2593"
},
{
"name": "Emacs Lisp",
"bytes": "57594"
},
{
"name": "LLVM",
"bytes": "74481"
},
{
"name": "Makefile",
"bytes": "2361"
},
{
"name": "Objective-C",
"bytes": "466082"
},
{
"name": "Objective-C++",
"bytes": "159688"
},
{
"name": "Python",
"bytes": "1968205"
},
{
"name": "Roff",
"bytes": "3683"
},
{
"name": "Ruby",
"bytes": "2132"
},
{
"name": "Shell",
"bytes": "214936"
},
{
"name": "Swift",
"bytes": "38740617"
},
{
"name": "Vim Script",
"bytes": "20025"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
} |
import sys
import os
import json
import ConfigParser
import logging
import argparse
import struct
sys.path.insert(0, os.path.abspath(os.getcwd()))
from modulos.Collection import Collection
from modulos.Indexer import Indexer
from modulos.PicklePersist import PicklePersist
from modulos.Postings import SequentialPostings, BinaryPostings
def loadArgParser():
parser = argparse.ArgumentParser(description='A script to index a collection of text documents')
parser.add_argument("corpus_path", help="the path to the corpus to be indexed")
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
return parser.parse_args()
def loadIni():
INI_PATH = os.path.dirname(os.path.realpath(__file__)) + "/config.ini"
Config = ConfigParser.ConfigParser()
Config.read(INI_PATH)
logging.info(INI_PATH)
iniData = {}
sections = Config.sections()
for option in Config.options(sections[0]):
opValue = Config.get(sections[0], option)
iniData[option] = opValue if opValue != -1 else False;
return iniData
def loadIndexConfig(iniData):
indexConfig = {}
if "stopwords" in iniData and iniData["stopwords"]:
indexConfig["stopwords"] = iniData['stopwords']
if "stem" in iniData and iniData["stem"]:
indexConfig["stem"] = iniData['stem']
if "term_min_size" in iniData and iniData["term_min_size"]:
indexConfig["term_min_size"] = int(iniData["term_min_size"])
if "term_max_size" in iniData and iniData["term_max_size"]:
indexConfig["term_max_size"] = int(iniData["term_max_size"])
return indexConfig
def storeIndexInDisk(indexDir, indexer):
tStr = ""
vocabularyFile = indexDir + "vocabulary.txt"
with open(vocabularyFile, "w") as f:
for t in indexer.vocabulary.content:
tStr += "%s:%d\n" % (t.encode('UTF-8'), indexer.vocabulary.getId(t))
f.write(tStr)
logging.info("Vocabulario guardado en: %s" % vocabularyFile)
bp = BinaryPostings.create(indexer.postings.getAll(), path=indexDir, title="postings.bin")
logging.info("Postings guardadas en: %s" % bp.path)
logging.info("Pointers to postings guardadas en: %s" % bp.storeTermToPointer(path=indexDir, title="postings_pointers.bin"))
with open(indexDir + "max_freq_in_docs.bin", "wb") as f:
max_freqs = [indexer.maxFreqInDocs[d] for d in range(0, len(indexer.maxFreqInDocs))]
f.write(struct.pack('<%sI' % len(max_freqs), *max_freqs))
logging.info("Max freq per doc guardadas en: " + indexDir + "max_freq_in_docs.bin")
with open(indexDir + "metadata.bin", "wb") as f:
f.write(struct.pack('<I', len(indexer.documents.content)))
f.write(struct.pack('<I', len(indexer.vocabulary.content)))
logging.info("Metadata guardada en: " + indexDir + "metadata.bin")
def index(corpusPath):
try:
collection = Collection(corpusPath)
except OSError, e:
logging.error(e)
raise
iniData = loadIni()
# data para el analizador lexico
indexConfig = loadIndexConfig(iniData)
indexer = Indexer(collection)
indexer.index(indexConfig)
return indexer
def indexAndSave(corpusPath):
index(corpusPath)
# Persisto indice para su recuperacion
INDEX_DIR = os.path.join(iniData['index_dir'], '')
if not os.path.exists(INDEX_DIR):
os.makedirs(INDEX_DIR)
storeIndexInDisk(INDEX_DIR, indexer)
if __name__ == "__main__":
args = loadArgParser()
if args.verbose:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
index(args.corpus_path)
| {
"content_hash": "f47e07f598e3fc3db76d65c02949b88d",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 124,
"avg_line_length": 34.845360824742265,
"alnum_prop": 0.7281065088757397,
"repo_name": "Juancard/parallel-and-distributed-IR",
"id": "fed55348b00d500df57a4fb81e55eb83d199bcca",
"size": "3404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IR_server/IR_python/indexer_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46162"
},
{
"name": "Cuda",
"bytes": "8192"
},
{
"name": "Java",
"bytes": "199834"
},
{
"name": "Makefile",
"bytes": "1437"
},
{
"name": "Python",
"bytes": "49327"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='mp3fm',
version='1.0.3',
author='Akshit Agarwal',
author_email='akshit.jiit@gmail.com',
url='https://github.com/Aki92/mp3fm',
packages=find_packages(),
entry_points = {
'console_scripts': ['mp3fm = mp3fm.mp3fm:main']
},
install_requires=['easygui', 'mutagen', 'musicbrainzngs'],
license='MIT',
description='''I believe that for Music Lovers its a big problem to \
keep songs organized into folders, so here comes a simple solution to that \
problem. Just run the app from inside the folder which contains songs and it \
will Pack the Songs into folders corresponding to the properties choosen by \
you from the followging options Album(Movie)/Artist/Year/Comments/Title/Duration.''',
long_description=open('README.rst').read(),
classifiers=["Environment :: Console", "Topic :: Multimedia"],
)
| {
"content_hash": "b1e3fe6d45f963351661ad04d4c16ce5",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 41.208333333333336,
"alnum_prop": 0.6440849342770475,
"repo_name": "Aki92/mp3fm",
"id": "0566e6af42c6b5e5b8989daabbef6a07de95962f",
"size": "989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41976"
},
{
"name": "Shell",
"bytes": "6708"
}
],
"symlink_target": ""
} |
"""
Commands for telling a worker to load tests or run tests.
@since: 12.3
"""
from twisted.protocols.amp import Command, String, Boolean
class Run(Command):
"""
Run a test.
"""
arguments = [('testCase', String())]
response = [('success', Boolean())]
class Start(Command):
"""
Set up the worker process, giving the running directory.
"""
arguments = [('directory', String())]
response = [('success', Boolean())]
| {
"content_hash": "c67c95b752551980558860db5d89046c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 60,
"avg_line_length": 18.73076923076923,
"alnum_prop": 0.5749486652977412,
"repo_name": "timkrentz/SunTracker",
"id": "775da2cf78e2c1563a82b26562659b479b0bae43",
"size": "561",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/trial/_dist/workercommands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
import json
import random
import re
import itertools
from collections import Counter
from qanta.ingestion.classifier import Classifier
from qanta.util.constants import (
GUESSER_TRAIN_FOLD,
BUZZER_TRAIN_FOLD,
GUESSER_DEV_FOLD,
BUZZER_DEV_FOLD,
GUESSER_TEST_FOLD,
BUZZER_TEST_FOLD,
)
def try_parse_int(text):
try:
return int(text)
except:
return None
CANONICAL_TOURNAMENT_MAP = {
"EFT": "Early Fall Tournament (EFT)",
"FKT": "Fall Kickoff Tournament (FKT)",
"Fall Kickoff Tournament": "Fall Kickoff Tournament (FKT)",
"LIST": "Ladue Invitational Sprint Tournament (LIST)",
"LIST (Ladue Invitational Spring Tournament)": "Ladue Invitational Sprint Tournament (LIST)",
"LIST (Ladue Invitational Spring Tournament) VI": "Ladue Invitational Sprint Tournament (LIST)",
"LIST III": "Ladue Invitational Sprint Tournament (LIST)",
"LIST IV": "Ladue Invitational Sprint Tournament (LIST)",
"Ladue Invitational Spring Tournament": "Ladue Invitational Sprint Tournament (LIST)",
"Maggie Walker GSAC XIX": "Maggie Walker GSAC",
"Maggie Walker GSAC XV": "Maggie Walker GSAC",
"Maggie Walker GSAC XVI": "Maggie Walker GSAC",
"Maggie Walker GSAC XVIII": "Maggie Walker GSAC",
"Prison Bowl VIII": "Prison Bowl",
"Prison Bowl X": "Prison Bowl",
"Tyrone Slothrop Lit": "Tyrone Slothrop Literature Singles",
"Terrapin": "Terrapin Invitational Tournament",
"Terrapin Invitational": "Terrapin Invitational Tournament",
"Mavis Gallant Memorial": "Mavis Gallant Memorial Tournament (Literature)",
"Geography Monstrosity 4": "Geography Monstrosity",
"Geography Monstrosity 2": "Geography Monstrosity",
}
def parse_tournament_name(tournament_name):
splits = tournament_name.split()
maybe_year = try_parse_int(splits[0])
if maybe_year is None:
if tournament_name in CANONICAL_TOURNAMENT_MAP:
return CANONICAL_TOURNAMENT_MAP[tournament_name], None
return tournament_name, None
else:
name = " ".join(splits[1:])
if name in CANONICAL_TOURNAMENT_MAP:
return CANONICAL_TOURNAMENT_MAP[name], maybe_year
else:
return name, maybe_year
TRASH_PREFIXES = [
r".*\(Note to moderator:.*\)",
r"\.",
r"\?",
r"\|",
r"\_",
r"\)",
r"[0-9]+[\.:]?",
"C:",
r"\[[A-Z/]+\]",
r"\([A-Z/]+\)",
r"BONUS\.?",
"10 pts:",
"15 pts:",
"10 points:",
"15 points:",
"Round [0-9]+:",
"BHSAT 2008 Packet #[0-9]+ Packet by Robert, Ian, Danila, and Linna",
r"Two answers required\.",
r"The name's the same\.",
"TWO ANSWERS REQUIRED\.",
"Warning: two answers required\.",
"NOTE:",
"WARNING:",
"MODERATOR NOTE:",
r"Pencil and paper ready\.",
r"\([A-Z]+\) Computational - pencil and paper ready\.",
r"Description acceptable\.",
r"Pyramidal Math \([0-9]+ Seconds\)",
r"Physics \([0-9]+ Seconds\)",
"Chemistry",
"Nonfiction",
"Vocabulary",
"US History",
"Music",
"Biology",
"Art/Architecture",
"Art/Archictecture",
"World Literature",
"Interdisciplinary",
"British Literature",
"Religion/Mythology",
"Tiebreaker [0-9]+.",
"Pop Culture",
"US Literature",
"World History",
r"Pencil and Paper Ready\.",
"United States History",
"United States Literature",
"Geography/Earth Science/Astronomy",
"Geography/Astronomy/Earth Science",
"Extra Tossups",
"Current Events",
"Extra Toss-Up #[0-9]+",
"Toss-Up #[0-9]+",
]
TRASH_PREFIX_PATTERN = "^({})".format("|".join(TRASH_PREFIXES))
def normalize_text(text):
text = text.replace("“", '"').replace("”", '"').replace("’", "'")
return re.sub(TRASH_PREFIX_PATTERN, "", text).lstrip()
classifier = Classifier()
class QuizdbOrg:
@staticmethod
def parse_tournaments(path):
with open(path) as f:
quizdb_tournaments = {}
for r in json.load(f):
name, year = parse_tournament_name(r["name"])
if year is not None and r["year"] != year:
raise ValueError("Years disagree, thats unexpected")
quizdb_tournaments[r["id"]] = {
"name": name,
"year": r["year"],
"difficulty": r["difficulty"],
}
return quizdb_tournaments
@staticmethod
def parse_categories(path):
with open(path) as f:
quizdb_category_list = json.load(f)
quizdb_categories = {r["id"]: r["name"] for r in quizdb_category_list}
return quizdb_categories
@staticmethod
def parse_subcategories(path):
categories = [
"Current Events",
"Fine Arts",
"Geography",
"History",
"Literature",
"Mythology",
"Philosophy",
"Religion",
"Science",
"Social Science",
"Trash",
]
pattern = f"(?:{'|'.join(categories)}) (.*)"
with open(path) as f:
quizdb_subcategory_list = json.load(f)
quizdb_subcategories = {}
for r in quizdb_subcategory_list:
m = re.match(pattern, r["name"])
if m is None:
quizdb_subcategories[r["id"]] = r["name"]
else:
quizdb_subcategories[r["id"]] = m.group(1)
return quizdb_subcategories
@staticmethod
def parse_tossups(qdb_tournaments, qdb_categories, qdb_subcategories, path):
with open(path) as f:
quizdb_questions = []
for q in json.load(f):
category_id = q["category_id"]
subcategory_id = q["subcategory_id"]
tournament_id = q["tournament_id"]
if tournament_id is None:
tournament = None
difficulty = None
year = -1
else:
t = qdb_tournaments[tournament_id]
tournament = t["name"]
difficulty = t["difficulty"]
year = int(t["year"])
if q["text"] == "[missing]":
continue
quizdb_questions.append(
{
"text": normalize_text(q["text"]),
"answer": q["answer"],
"page": None,
"category": classifier.predict_category(q["text"] + " ANSWER: " + q["answer"]),
"subcategory": classifier.predict_subcategory(q["text"] + " ANSWER: " + q["answer"]),
"tournament": tournament,
"difficulty": difficulty,
"year": year,
"proto_id": None,
"qdb_id": q["id"],
"dataset": "quizdb.org",
}
)
return quizdb_questions
class Protobowl:
@staticmethod
def parse_tossups(path):
with open(path) as f:
protobowl_raw = [json.loads(l) for l in f]
protobowl_questions = []
for q in protobowl_raw:
if q["question"] == "[missing]":
continue
protobowl_questions.append(
{
"text": normalize_text(q["question"]),
"answer": q["answer"],
"page": None,
"category": classifier.predict_category(q["question"] + " ANSWER: " + q["answer"]),
"subcategory": classifier.predict_subcategory(q["question"] + " ANSWER: " + q["answer"]),
"tournament": q["tournament"],
"difficulty": q["difficulty"],
"year": q["year"],
"proto_id": q["_id"]["$oid"],
"qdb_id": None,
"dataset": "protobowl",
}
)
return protobowl_questions
def merge_datasets(protobowl_questions, quizdb_questions):
"""
This function is responsible for merging protobowl and quizdb datasets. The primary steps
in this process are:
1) Compute a list of tournament/year
2) Select which dataset to get questions from for a specific tournament and year
3) Return the dataset in json serializable format
:param protobowl_questions: Parsed protobowl questions
:param quizdb_questions: Parsed quizdb questions
:return:
"""
proto_tournament_years = Counter()
for r in protobowl_questions:
if r["tournament"] is not None:
proto_tournament_years[(r["tournament"], r["year"])] += 1
qdb_tournament_years = Counter()
for r in quizdb_questions:
if r["tournament"] is not None:
qdb_tournament_years[(r["tournament"], r["year"])] += 1
selected_tournaments = {}
possible_tournaments = set(qdb_tournament_years.keys()) | set(
proto_tournament_years.keys()
)
for ty in possible_tournaments:
if ty in proto_tournament_years and ty in qdb_tournament_years:
n_proto = proto_tournament_years[ty]
n_qdb = qdb_tournament_years[ty]
n_max = max(n_proto, n_qdb)
n_min = min(n_proto, n_qdb)
p_10 = 0.1 * n_max
if n_proto > n_qdb:
selected_tournaments[ty] = ("proto_choose", n_proto, n_qdb)
elif (n_max - n_min) <= p_10:
selected_tournaments[ty] = ("proto_close", n_proto, n_proto)
else:
selected_tournaments[ty] = ("qdb_choose", n_qdb, n_proto)
elif ty in proto_tournament_years:
selected_tournaments[ty] = ("proto_default", proto_tournament_years[ty], 0)
elif ty in qdb_tournament_years:
selected_tournaments[ty] = ("qdb_default", qdb_tournament_years[ty], 0)
else:
raise ValueError("This is impossible")
questions = []
for i, q in enumerate(itertools.chain(protobowl_questions, quizdb_questions)):
ty = (q["tournament"], q["year"])
if ty in selected_tournaments:
is_proto = selected_tournaments[ty][0].startswith("proto")
is_qdb = selected_tournaments[ty][0].startswith("qdb")
if is_proto and q["dataset"] == "protobowl":
q["qanta_id"] = i
questions.append(q)
elif is_qdb and q["dataset"] == "quizdb.org":
q["qanta_id"] = i
questions.append(q)
return questions
TEST_TOURNAMENTS = {"ACF Regionals", "PACE NSC", "NASAT", "ACF Nationals", "ACF Fall"}
GUESSTEST_YEARS = {2017, 2018} # These years do not have gameplay data at all
BUZZTEST_YEARS = {2016}
DEV_YEARS = {2015}
def assign_folds_(
qanta_questions, question_player_counts, random_seed=0, guessbuzz_frac=0.8
):
"""
Note that q['proto_id'] in question_player_counts being True implies the dataset source is protobowl.
"""
random.seed(random_seed)
for q in qanta_questions:
if q["proto_id"] in question_player_counts:
q["gameplay"] = True
else:
q["gameplay"] = False
is_test_tournament = q["tournament"] in TEST_TOURNAMENTS
if is_test_tournament and q["year"] in GUESSTEST_YEARS:
q["fold"] = GUESSER_TEST_FOLD
elif is_test_tournament and q["year"] in BUZZTEST_YEARS:
q["fold"] = BUZZER_TEST_FOLD
elif is_test_tournament and q["year"] in DEV_YEARS:
# Split randomly between guesser and buzzer to preserve data distribution
if random.random() < 0.5:
q["fold"] = BUZZER_DEV_FOLD
else:
q["fold"] = GUESSER_DEV_FOLD
else:
# For Training we don't try to preserve as much since more data is more important
if random.random() < guessbuzz_frac:
q["fold"] = GUESSER_TRAIN_FOLD
else:
# assigning questions to buzzer train that have no gameplay is useless
if q["proto_id"] in question_player_counts:
q["fold"] = BUZZER_TRAIN_FOLD
else:
q["fold"] = GUESSER_TRAIN_FOLD
if "fold" not in q:
raise ValueError("Cannot leave a question without an assigned fold")
| {
"content_hash": "98742cf9dc4896e4a754669071d70a85",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 113,
"avg_line_length": 35.939828080229226,
"alnum_prop": 0.5455632623774217,
"repo_name": "miyyer/qb",
"id": "112fd2463043c23d87d1df0b79c49779a8197462",
"size": "12549",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qanta/ingestion/normalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "10277"
},
{
"name": "Python",
"bytes": "236223"
},
{
"name": "R",
"bytes": "1095"
},
{
"name": "Shell",
"bytes": "278"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import with_statement
import anyjson
import warnings
from celery import registry
from celery.app import app_or_default
from celery.task import Task
from celery.task.sets import subtask, TaskSet
from celery.tests.utils import unittest
from celery.tests.compat import catch_warnings
class MockTask(Task):
name = "tasks.add"
def run(self, x, y, **kwargs):
return x + y
@classmethod
def apply_async(cls, args, kwargs, **options):
return (args, kwargs, options)
@classmethod
def apply(cls, args, kwargs, **options):
return (args, kwargs, options)
class test_subtask(unittest.TestCase):
def test_behaves_like_type(self):
s = subtask("tasks.add", (2, 2), {"cache": True},
{"routing_key": "CPU-bound"})
self.assertDictEqual(subtask(s), s)
def test_task_argument_can_be_task_cls(self):
s = subtask(MockTask, (2, 2))
self.assertEqual(s.task, MockTask.name)
def test_apply_async(self):
s = MockTask.subtask((2, 2), {"cache": True},
{"routing_key": "CPU-bound"})
args, kwargs, options = s.apply_async()
self.assertTupleEqual(args, (2, 2))
self.assertDictEqual(kwargs, {"cache": True})
self.assertDictEqual(options, {"routing_key": "CPU-bound"})
def test_delay_argmerge(self):
s = MockTask.subtask((2, ), {"cache": True},
{"routing_key": "CPU-bound"})
args, kwargs, options = s.delay(10, cache=False, other="foo")
self.assertTupleEqual(args, (10, 2))
self.assertDictEqual(kwargs, {"cache": False, "other": "foo"})
self.assertDictEqual(options, {"routing_key": "CPU-bound"})
def test_apply_async_argmerge(self):
s = MockTask.subtask((2, ), {"cache": True},
{"routing_key": "CPU-bound"})
args, kwargs, options = s.apply_async((10, ),
{"cache": False, "other": "foo"},
routing_key="IO-bound",
exchange="fast")
self.assertTupleEqual(args, (10, 2))
self.assertDictEqual(kwargs, {"cache": False, "other": "foo"})
self.assertDictEqual(options, {"routing_key": "IO-bound",
"exchange": "fast"})
def test_apply_argmerge(self):
s = MockTask.subtask((2, ), {"cache": True},
{"routing_key": "CPU-bound"})
args, kwargs, options = s.apply((10, ),
{"cache": False, "other": "foo"},
routing_key="IO-bound",
exchange="fast")
self.assertTupleEqual(args, (10, 2))
self.assertDictEqual(kwargs, {"cache": False, "other": "foo"})
self.assertDictEqual(options, {"routing_key": "IO-bound",
"exchange": "fast"})
def test_is_JSON_serializable(self):
s = MockTask.subtask((2, ), {"cache": True},
{"routing_key": "CPU-bound"})
s.args = list(s.args) # tuples are not preserved
# but this doesn't matter.
self.assertEqual(s,
subtask(anyjson.deserialize(
anyjson.serialize(s))))
def test_repr(self):
s = MockTask.subtask((2, ), {"cache": True})
self.assertIn("2", repr(s))
self.assertIn("cache=True", repr(s))
def test_reduce(self):
s = MockTask.subtask((2, ), {"cache": True})
cls, args, _ = s.__reduce__()
self.assertDictEqual(dict(cls(*args)), dict(s))
class test_TaskSet(unittest.TestCase):
def test_interface__compat(self):
warnings.resetwarnings()
with catch_warnings(record=True) as log:
ts = TaskSet(MockTask, [[(2, 2)], [(4, 4)], [(8, 8)]])
self.assertListEqual(ts.tasks,
[MockTask.subtask((i, i))
for i in (2, 4, 8)])
self.assertIn("Using this invocation of TaskSet is deprecated",
log[0].message.args[0])
log[:] = []
self.assertEqual(ts.task, registry.tasks[MockTask.name])
self.assertTrue(log)
self.assertIn("TaskSet.task is deprecated",
log[0].message.args[0])
log[:] = []
self.assertEqual(ts.task_name, MockTask.name)
self.assertTrue(log)
self.assertIn("TaskSet.task_name is deprecated",
log[0].message.args[0])
def test_task_arg_can_be_iterable__compat(self):
ts = TaskSet([MockTask.subtask((i, i))
for i in (2, 4, 8)])
self.assertEqual(len(ts), 3)
def test_respects_ALWAYS_EAGER(self):
app = app_or_default()
class MockTaskSet(TaskSet):
applied = 0
def apply(self, *args, **kwargs):
self.applied += 1
ts = MockTaskSet([MockTask.subtask((i, i))
for i in (2, 4, 8)])
app.conf.CELERY_ALWAYS_EAGER = True
try:
ts.apply_async()
finally:
app.conf.CELERY_ALWAYS_EAGER = False
self.assertEqual(ts.applied, 1)
def test_apply_async(self):
applied = [0]
class mocksubtask(subtask):
def apply_async(self, *args, **kwargs):
applied[0] += 1
ts = TaskSet([mocksubtask(MockTask, (i, i))
for i in (2, 4, 8)])
ts.apply_async()
self.assertEqual(applied[0], 3)
class Publisher(object):
def send(self, *args, **kwargs):
pass
ts.apply_async(publisher=Publisher())
def test_apply(self):
applied = [0]
class mocksubtask(subtask):
def apply(self, *args, **kwargs):
applied[0] += 1
ts = TaskSet([mocksubtask(MockTask, (i, i))
for i in (2, 4, 8)])
ts.apply()
self.assertEqual(applied[0], 3)
| {
"content_hash": "3ec404fbfd0ae36a5b2e8efac5f8ad18",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 79,
"avg_line_length": 34.240437158469945,
"alnum_prop": 0.5134056814554739,
"repo_name": "softak/webfaction_demo",
"id": "ffb35ba7e02ed8722c0a5342a1326f595480fb3a",
"size": "6266",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/celery/tests/test_task/test_task_sets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
from urllib.parse import urljoin, urlparse
from ...app import models
from ...app.types import AppExtensionTarget
from ...core.jwt import (
create_access_token_for_app,
create_access_token_for_app_extension,
)
from ..core.utils import from_global_id_or_error
from .enums import AppTypeEnum
def resolve_apps_installations(info):
return models.AppInstallation.objects.all()
def resolve_apps(info):
return models.App.objects.all()
def resolve_access_token_for_app(info, root):
if root.type != AppTypeEnum.THIRDPARTY.value:
return None
user = info.context.user
if not user or not user.is_staff:
return None
return create_access_token_for_app(root, user)
def resolve_access_token_for_app_extension(info, root, app):
user = info.context.user
if not user:
return None
extension_permissions = root.permissions.all()
user_permissions = user.effective_permissions
if set(extension_permissions).issubset(user_permissions):
return create_access_token_for_app_extension(
app_extension=root, permissions=extension_permissions, user=user, app=app
)
return None
def resolve_app(_info, id):
if not id:
return None
_, id = from_global_id_or_error(id, "App")
return models.App.objects.filter(id=id).first()
def resolve_app_extensions(_info):
return models.AppExtension.objects.filter(app__is_active=True)
def resolve_app_extension_url(root):
"""Return an extension url.
Apply url stitching when these 3 conditions are met:
- url starts with /
- target == "POPUP"
- appUrl is defined
"""
target = root.get("target", AppExtensionTarget.POPUP)
app_url = root["app_url"]
url = root["url"]
if url.startswith("/") and app_url and target == AppExtensionTarget.POPUP:
parsed_url = urlparse(app_url)
new_path = urljoin(parsed_url.path, url[1:])
return parsed_url._replace(path=new_path).geturl()
return url
| {
"content_hash": "fce025d1afa4c56c25451691c5aab59a",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 85,
"avg_line_length": 28.67142857142857,
"alnum_prop": 0.6796213253612357,
"repo_name": "mociepka/saleor",
"id": "b4e5decf4fad148c11afc51887c8f53cea3837db",
"size": "2007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/graphql/app/resolvers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
"""Cloud browser template tags."""
import os
from django import template
from django.template import TemplateSyntaxError, Node
from django.template.defaultfilters import stringfilter
from cloud_browser.app_settings import settings
register = template.Library() # pylint: disable=C0103
@register.filter
@stringfilter
def truncatechars(value, num, end_text="..."):
"""Truncate string on character boundary.
.. note::
Django ticket `5025 <http://code.djangoproject.com/ticket/5025>`_ has a
patch for a more extensible and robust truncate characters tag filter.
Example::
{{ my_variable|truncatechars:22 }}
:param value: Value to truncate.
:type value: ``string``
:param num: Number of characters to trim to.
:type num: ``int``
"""
length = None
try:
length = int(num)
except ValueError:
pass
if length is not None and len(value) > length:
return value[:length-len(end_text)] + end_text
return value
truncatechars.is_safe = True # pylint: disable=W0612
@register.tag
def cloud_browser_media_url(_, token):
"""Get base media URL for application static media.
Correctly handles whether or not the settings variable
``CLOUD_BROWSER_STATIC_MEDIA_DIR`` is set and served.
For example::
<link rel="stylesheet" type="text/css"
href="{% cloud_browser_media_url "css/cloud-browser.css" %}" />
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
rel_path = bits[1]
return MediaUrlNode(rel_path)
class MediaUrlNode(Node):
"""Media URL node."""
#: Static application media URL (or ``None``).
static_media_url = settings.app_media_url
def __init__(self, rel_path):
"""Initializer."""
super(MediaUrlNode, self).__init__()
self.rel_path = rel_path.lstrip('/').strip("'").strip('"')
def render(self, context):
"""Render."""
from django.core.urlresolvers import reverse
# Check if we have real or Django static-served media
if self.static_media_url is not None:
# Real.
return os.path.join(self.static_media_url, self.rel_path)
else:
# Django.
return reverse("cloud_browser_media",
args=[self.rel_path],
current_app='cloud_browser')
| {
"content_hash": "b26a62d078d2eba54ccc1892648cea68",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 27.886363636363637,
"alnum_prop": 0.623879380603097,
"repo_name": "lantip/aws-filemanager",
"id": "5b7377d614bf487db2bda37384dd6c491e631770",
"size": "2454",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloud_browser/templatetags/cloud_browser_extras.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4449"
},
{
"name": "HTML",
"bytes": "14401"
},
{
"name": "JavaScript",
"bytes": "5794"
},
{
"name": "Python",
"bytes": "133825"
}
],
"symlink_target": ""
} |
from itertools import product
class Interval:
"""Generate intervals from lower to upper of size delta, last interval
will be smaller if needed
>>> from dstools.pipeline.util import Interval
>>> from datetime import date
>>> from dateutil.relativedelta import relativedelta
>>> Interval(date(year=2010, month=1, day=1),
... date(year=2019, month=6, day=1),
... relativedelta(years=1)).expand()
"""
def __init__(self, lower, upper, delta):
if lower >= upper:
raise ValueError('lower must be strictly lower than upper')
self.lower = lower
self.upper = upper
self.delta = delta
def expand(self):
tuples = []
cursor = self.lower
i = 0
while True:
cursor = self.lower + (i + 1) * self.delta
if cursor < self.upper:
tuples.append((self.lower + i * self.delta, cursor))
i += 1
else:
tuples.append((self.lower + i * self.delta, self.upper))
break
return tuples
def __repr__(self):
return ('Interval from {} to {} with delta {}'
.format(self.lower, self.upper, self.delta))
class ParamGrid:
"""Generate parameter grids
>>> pg = ParamGrid({'a': [1, 2, 3], 'b': [2, 4, 6]})
>>> list(pg.zip())
>>> list(pg.product())
>>> pg = ParamGrid({'a': Interval(0, 10, 2), 'b': [2, 4, 6, 8, 10]})
>>> list(pg.zip())
"""
def __init__(self, grid):
expanded = {}
for k, v in grid.items():
if isinstance(v, Interval):
expanded[k] = v.expand()
else:
expanded[k] = v
self.expanded = expanded
def zip(self):
lengths = set(len(v) for v in self.expanded.values())
if len(lengths) != 1:
raise ValueError('All parameters should have the same length')
length = list(lengths)[0]
for i in range(length):
yield {k: v[i] for k, v in self.expanded.items()}
def product(self):
keys = self.expanded.keys()
values = self.expanded.values()
for elements in product(*values):
d = {}
for k, v in zip(keys, elements):
d[k] = v
yield d
| {
"content_hash": "0d356ede7aa675167dc106e39f41ec01",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 74,
"avg_line_length": 25.977777777777778,
"alnum_prop": 0.5162532078699743,
"repo_name": "edublancas/dstools",
"id": "cf9fbd3431cc6b54c91737d20de77762cf3c3f6b",
"size": "2338",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/dstools/pipeline/util/param_grid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21898"
},
{
"name": "Jupyter Notebook",
"bytes": "1370"
},
{
"name": "Python",
"bytes": "260295"
}
],
"symlink_target": ""
} |
import argparse
import redis
if __name__ == '__main__':
| {
"content_hash": "dcc568cad250f1ded6ad25b1729ae476",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 26,
"avg_line_length": 12.4,
"alnum_prop": 0.5645161290322581,
"repo_name": "jbfavre/dynomite",
"id": "8590ba11fac4d7eb3a080f1a5e126bcd9fb5ef5f",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/load.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1041946"
},
{
"name": "C++",
"bytes": "15541"
},
{
"name": "JavaScript",
"bytes": "1145"
},
{
"name": "M4",
"bytes": "6129"
},
{
"name": "Makefile",
"bytes": "10631"
},
{
"name": "Python",
"bytes": "30323"
},
{
"name": "Shell",
"bytes": "82008"
}
],
"symlink_target": ""
} |
'''
This script is a check for lookup at disks consumption
'''
import os
import sys
# Ok try to load our directory to load the plugin utils.
my_dir = os.path.dirname(__file__)
sys.path.insert(0, my_dir)
try:
import schecks
except ImportError:
print "ERROR : this plugin needs the local schecks.py lib. Please install it"
sys.exit(2)
VERSION = "0.1"
DEFAULT_WARNING = '75%'
DEFAULT_CRITICAL = '90%'
MOUNTS = None
UNITS= {'B': 0,
'KB': 1,
'MB': 2,
'GB': 3,
'TB': 4
}
def convert_to(unit, value):
power = 0
if unit in UNITS:
power = UNITS[unit]
return round(float(value)/(1024**power), power)
def get_df(client):
# We are looking for a line like
#Filesystem Type 1K-blocks Used Available Use% Mounted on
#/dev/sda2 ext3 28834744 21802888 5567132 80% /
#udev devtmpfs 1021660 4 1021656 1% /dev
#tmpfs tmpfs 412972 1040 411932 1% /run
#none tmpfs 5120 4 5116 1% /run/lock
#none tmpfs 1032428 13916 1018512 2% /run/shm
#none tmpfs 102400 8 102392 1% /run/user
#/dev/sda5 fuseblk 251536380 184620432 66915948 74% /media/ntfs
#/dev/sdb1 ext3 961432072 833808328 78785744 92% /media/bigdata
# Beware of the export!
stdin, stdout, stderr = client.exec_command('export LC_LANG=C && unset LANG && df -l -T -k -P')
dfs = {}
for line in stdout:
line = line.strip()
# By pass the firt line, we already know about it
if not line or line.startswith('Filesystem'):
continue
# Only keep non void elements
tmp = [s for s in line.split(' ') if s]
_type = tmp[1]
# Ok maybe we got a none or tmpfs system, if so, bailout
if _type in ['tmpfs', 'devtmpfs', 'iso9660']:
continue
#if we specify a list of mountpoints to check then verify that current line is in the list
to_check = True
if MOUNTS:
to_check = False
for mnt in MOUNTS:
if tmp[6].startswith(mnt):
to_check = True
# Maybe this mount point did not match any required mount point
if not to_check:
continue
# Ok now grep values
fs = tmp[0]
size = int(tmp[2])*1024
used = int(tmp[3])*1024
avail = int(tmp[4])*1024
used_pct = int(tmp[5][:-1]) # we remove the %
mounted = ' '.join(tmp[6:])
dfs[mounted] = {'fs':fs, 'size':size, 'used':used, 'avail':avail, 'used_pct':used_pct}
# Before return, close the client
client.close()
return dfs
parser = schecks.get_parser()
## Specific options
parser.add_option('-w', '--warning',
dest="warning",
help='Warning value for physical used memory. In percent. Default : 75%')
parser.add_option('-c', '--critical',
dest="critical",
help='Critical value for physical used memory. In percent. Must be '
'superior to warning value. Default : 90%')
parser.add_option('-m', '--mount-points',
dest="mounts",
help='comma separated list of mountpoints to check. Default all mount '
'points except of tmpfs types')
parser.add_option('-U', '--unit',
dest="unit", help='Unit of Disk Space. B, KB, GB, TB. Default : B')
if __name__ == '__main__':
# Ok first job : parse args
opts, args = parser.parse_args()
if opts.mounts:
mounts = opts.mounts.split(',')
MOUNTS=mounts
# Try to get numeic warning/critical values
s_warning = opts.warning or DEFAULT_WARNING
s_critical = opts.critical or DEFAULT_CRITICAL
warning, critical = schecks.get_warn_crit(s_warning, s_critical)
# Get Unit
s_unit = opts.unit or 'B'
# Ok now got an object that link to our destination
client = schecks.get_client(opts)
## And get real data
dfs = get_df(client)
# Maybe we failed at getting data
if not dfs:
print "Error : cannot fetch disks values from host"
sys.exit(2)
perfdata = ''
status = 0 # all is green until it is no more ok :)
bad_volumes = []
for (mount, df) in dfs.iteritems():
size = convert_to(s_unit,df['size'])
used = convert_to(s_unit,df['used'])
used_pct = df['used_pct']
# Let first dump the perfdata
_size_warn = convert_to(s_unit,df['size'] * float(warning)/100)
_size_crit = convert_to(s_unit,df['size'] * float(critical)/100)
perfdata += '"%s_used_pct"=%s%%;%s%%;%s%%;0%%;100%% "%s_used"=%s%s;%s;%s;0;%s ' % (mount, used_pct, warning, critical, mount, used, s_unit, _size_warn, _size_crit, size)
# And compare to limits
if used_pct >= critical:
status = 2
bad_volumes.append( (mount, used_pct) )
if used_pct >= warning and status == 0:
status = 1
bad_volumes.append( (mount, used_pct) )
if status == 0:
print "Ok: all disks are in the limits | %s" % (perfdata)
sys.exit(0)
if status == 1:
print "Warning: some disks are not good : %s | %s" % (','.join( ["%s:%s%%" % (mount, used_pct) for (mount, used_pct) in bad_volumes]), perfdata)
sys.exit(1)
if status == 2:
print "Critical: some disks are not good : %s | %s" % (','.join( ["%s:%s%%" % (mount, used_pct) for (mount, used_pct) in bad_volumes]), perfdata)
sys.exit(2)
| {
"content_hash": "e4cee59cdc3c7b4b72ae5a0bccc5cb36",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 177,
"avg_line_length": 33.24277456647399,
"alnum_prop": 0.5470352982090071,
"repo_name": "ovh/check-linux-by-ssh",
"id": "147812f5bfc2a5c62c9f90cebd09cafc676d9d92",
"size": "6947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_disks_by_ssh.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125504"
}
],
"symlink_target": ""
} |
"""Class that computes the Wasserstein distance in tensorflow.
The implementation follows Algorithm 2 in [Genevay Aude, Marco Cuturi,
Gabriel Peyre, Francis Bach, "Stochastic Optimization for Large-scale
Optimal Transport", NIPS 2016], which compares a distribution to a
fixed set of samples. Internally, base distances are recomputed a lot.
To just compute the Wasserstein distance between to sets of points,
don't use this code, just do a bipartitle matching.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Wasserstein(object):
"""Class to hold (ref to) data and compute Wasserstein distance."""
def __init__(self, source_gen, target_gen, basedist=None):
"""Inits Wasserstein with source and target data."""
self.source_gen = source_gen
self.source_bs = source_gen.bs
self.target_gen = target_gen
self.target_bs = target_gen.bs
self.gradbs = self.source_bs # number of source sample to compute gradient
if basedist is None:
basedist = self.l2dist
self.basedist = basedist
def add_summary_montage(self, images, name, num=9):
vis_images = tf.split(images[:num], num_or_size_splits=num, axis=0)
vis_images = tf.concat(vis_images, axis=2)
tf.summary.image(name, vis_images)
return vis_images
def add_summary_images(self, num=9):
"""Visualize source images and nearest neighbors from target."""
source_ims = self.source_gen.get_batch(bs=num, reuse=True)
vis_images = self.add_summary_montage(source_ims, 'source_ims', num)
target_ims = self.target_gen.get_batch()
_ = self.add_summary_montage(target_ims, 'target_ims', num)
c_xy = self.basedist(source_ims, target_ims) # pairwise cost
idx = tf.argmin(c_xy, axis=1) # find nearest neighbors
matches = tf.gather(target_ims, idx)
vis_matches = self.add_summary_montage(matches, 'neighbors_ims', num)
vis_both = tf.concat([vis_images, vis_matches], axis=1)
tf.summary.image('matches_ims', vis_both)
return
def l2dist(self, source, target):
"""Computes pairwise Euclidean distances in tensorflow."""
def flatten_batch(x):
dim = tf.reduce_prod(tf.shape(x)[1:])
return tf.reshape(x, [-1, dim])
def scale_batch(x):
dim = tf.reduce_prod(tf.shape(x)[1:])
return x/tf.sqrt(tf.cast(dim, tf.float32))
def prepare_batch(x):
return scale_batch(flatten_batch(x))
target_flat = prepare_batch(target) # shape: [bs, nt]
target_sqnorms = tf.reduce_sum(tf.square(target_flat), axis=1, keep_dims=True)
target_sqnorms_t = tf.transpose(target_sqnorms)
source_flat = prepare_batch(source) # shape: [bs, ns]
source_sqnorms = tf.reduce_sum(tf.square(source_flat), axis=1, keep_dims=True)
dotprod = tf.matmul(source_flat, target_flat, transpose_b=True) # [ns, nt]
sqdist = source_sqnorms - 2*dotprod + target_sqnorms_t
dist = tf.sqrt(tf.nn.relu(sqdist)) # potential tiny negatives are suppressed
return dist # shape: [ns, nt]
def grad_hbar(self, v, gradbs, reuse=True):
"""Compute gradient of hbar function for Wasserstein iteration."""
source_ims = self.source_gen.get_batch(bs=gradbs, reuse=reuse)
target_data = self.target_gen.get_batch()
c_xy = self.basedist(source_ims, target_data)
c_xy -= v # [gradbs, trnsize]
idx = tf.argmin(c_xy, axis=1) # [1] (index of subgradient)
target_bs = self.target_bs
xi_ij = tf.one_hot(idx, target_bs) # find matches, [gradbs, trnsize]
xi_ij = tf.reduce_mean(xi_ij, axis=0, keep_dims=True) # [1, trnsize]
grad = 1./target_bs - xi_ij # output: [1, trnsize]
return grad
def hbar(self, v, reuse=True):
"""Compute value of hbar function for Wasserstein iteration."""
source_ims = self.source_gen.get_batch(bs=None, reuse=reuse)
target_data = self.target_gen.get_batch()
c_xy = self.basedist(source_ims, target_data)
c_avg = tf.reduce_mean(c_xy)
c_xy -= c_avg
c_xy -= v
c_xy_min = tf.reduce_min(c_xy, axis=1) # min_y[ c(x, y) - v(y) ]
c_xy_min = tf.reduce_mean(c_xy_min) # expectation wrt x
return tf.reduce_mean(v, axis=1) + c_xy_min + c_avg # avg wrt y
def k_step(self, k, v, vt, c, reuse=True):
"""Perform one update step of Wasserstein computation."""
grad_h = self.grad_hbar(vt, gradbs=self.gradbs, reuse=reuse)
vt = tf.assign_add(vt, c/tf.sqrt(k)*grad_h, name='vt_assign_add')
v = ((k-1.)*v + vt)/k
return k+1, v, vt, c
def dist(self, C=.1, nsteps=10, reset=False):
"""Compute Wasserstein distance (Alg.2 in [Genevay etal, NIPS'16])."""
target_bs = self.target_bs
vtilde = tf.Variable(tf.zeros([1, target_bs]), name='vtilde')
v = tf.Variable(tf.zeros([1, target_bs]), name='v')
k = tf.Variable(1., name='k')
k = k.assign(1.) # restart averaging from 1 in each call
if reset: # used for randomly sampled target data, otherwise warmstart
v = v.assign(tf.zeros([1, target_bs])) # reset every time graph is evaluated
vtilde = vtilde.assign(tf.zeros([1, target_bs]))
# (unrolled) optimization loop. first iteration, create variables
k, v, vtilde, C = self.k_step(k, v, vtilde, C, reuse=False)
# (unrolled) optimization loop. other iterations, reuse variables
k, v, vtilde, C = tf.while_loop(cond=lambda k, *_: k < nsteps,
body=self.k_step,
loop_vars=[k, v, vtilde, C])
v = tf.stop_gradient(v) # only transmit gradient through cost
val = self.hbar(v)
return tf.reduce_mean(val)
| {
"content_hash": "13daabd1e21393a828a70d96ff0f6d3e",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 83,
"avg_line_length": 41.386861313868614,
"alnum_prop": 0.6513227513227513,
"repo_name": "google/wasserstein-dist",
"id": "9d65a2162d8d2423d039e05632999685783c6a9c",
"size": "6246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wasserstein.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18961"
}
],
"symlink_target": ""
} |
from flask import Blueprint, request
from flask import current_app as app
import models
import flask
editable = Blueprint("editable", __name__, template_folder="templates", static_folder="static")
@editable.route("/", methods=["GET", "POST"])
@editable.route("/<path:url>", methods=["GET", "POST"])
def editor(url = ""):
if request.method == "GET":
flask.g.current_view_is_editor = True
client = app.test_client()
return client.get("/" + url, headers = list(request.headers))
if request.method == "POST":
for name, text in request.json["text"].items():
app.extensions["editable"]["set_text"](name, text)
for name, image in request.json["images"].items():
app.extensions["editable"]["set_image"](name, image["file"], image["description"])
for name, image in request.json["bgimages"].items():
app.extensions["editable"]["set_bgimage"](name, image)
return ("", 204)
| {
"content_hash": "a60b3823910b6cf8f9325b143ebc7593",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 95,
"avg_line_length": 42,
"alnum_prop": 0.6262939958592133,
"repo_name": "SmileyJames/flask-editable",
"id": "62114e75973188ffb6545d226fe516b8dbab73b9",
"size": "966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_editable/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2566"
},
{
"name": "HTML",
"bytes": "266"
},
{
"name": "JavaScript",
"bytes": "5342"
},
{
"name": "Python",
"bytes": "9506"
}
],
"symlink_target": ""
} |
__author__ = 'hydezhang'
from utils.db_base import *
from collections import OrderedDict
def initialise_tenants_mapping():
"""function to create the tenant table
which is used to record tenant that has
been migrated
"""
table_name = "tenants"
if not check_table_exist(table_name):
columns = '''id INT NOT NULL AUTO_INCREMENT,
project_name VARCHAR(32) NOT NULL,
src_uuid VARCHAR(128) NOT NULL,
src_cloud VARCHAR(128) NOT NULL,
new_project_name VARCHAR(32) NOT NULL,
dst_uuid VARCHAR(128) NOT NULL,
dst_cloud VARCHAR(128) NOT NULL,
images_migrated INT NOT NULL,
quota_updated INT NOT NULL,
state VARCHAR(128) NOT NULL,
PRIMARY KEY(id, src_uuid, dst_uuid)
'''
create_table(table_name, columns, True)
return
def record_tenant_migrated(tenant_details):
"""function to insert the detail of
tenant, which has been migrated, into database
:param tenant_details: relevant data of migrated tenant
"""
table_name = "tenants"
values_to_insert = []
for t_details in tenant_details:
# check whether record exists before insert
where_dict = {'src_uuid': t_details["src_uuid"],
'src_cloud': t_details["src_cloud"],
'dst_cloud': t_details["dst_cloud"]}
if not check_record_exist(table_name, where_dict):
values_to_insert.append(t_details)
else:
# do a update instead
update_migration_record(**t_details)
insert_record(table_name, values_to_insert, True)
def get_migrated_tenant(values):
"""function to return detail of tenant migration
:param values: tenant name and cloud name that used to filter data
:return: tenant migrate detail
"""
# parameters for "SELECT"
table_name = "tenants"
columns = ["*"]
filters = {"project_name": values[0],
"src_cloud": values[1],
"dst_cloud": values[2]}
data = read_record(table_name, columns, filters, True)
if not data or len(data) == 0:
LOG.info("no migration record found for tenant '{0}' in cloud '{1}'"
.format(values[0], values[1]))
return None
elif len(data) > 1:
#TODO: Move multiple records check from all db_handler outside since the
#TODO: validity of multiple matching record depends on application logic
LOG.info("multiple migration record found for tenant '{0}' in cloud '{1}'"
.format(values[0], values[1]))
return None
# should be only one row
tenant_data = {'project_name': data[0][1],
'src_uuid': data[0][2],
'src_cloud': data[0][3],
'new_project_name': data[0][4],
'dst_uuid': data[0][5],
'dst_cloud': data[0][6],
'images_migrated': data[0][7],
'quota_updated': data[0][8],
'state': data[0][9]}
return tenant_data
def update_migration_record(**tenant_details):
"""function to update tenant migration record
:param tenant_details: data used to update tenant migration record
"""
table_name = "tenants"
w_dict = OrderedDict([('src_uuid', tenant_details["src_uuid"]),
('src_cloud', tenant_details["src_cloud"]),
('dst_cloud', tenant_details["dst_cloud"])])
update_table(table_name, tenant_details, w_dict, True)
def delete_migration_record(values):
"""function to delete a tenant migration record in database
:param values: relevant data of tenant migration record
which is used to filter data
"""
table_name = "tenants"
record_filter = {'project_name': values[0],
'src_uuid': values[1],
'src_cloud': values[2],
'dst_cloud': values[3]}
delete_record(table_name, record_filter) | {
"content_hash": "11c32f20630e027f9e2bbda65dd8a8ab",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 82,
"avg_line_length": 34.247933884297524,
"alnum_prop": 0.5644305019305019,
"repo_name": "Phoenix1708/OpenAcademy_OpenStack_Flyway",
"id": "f1dd78da22e23b7bd2770a900cf7f63c3cdc3785",
"size": "4144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flyway/utils/db_handlers/tenants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "388880"
},
{
"name": "JavaScript",
"bytes": "348422"
},
{
"name": "Python",
"bytes": "281721"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
} |
import os
import time
import contextlib
import selenium.common
from selenium import webdriver
@contextlib.contextmanager
def new_auto(*args, **kwargs):
b = new(*args, **kwargs)
yield b
b.quit()
def new(url=None, size=None):
PATH_CHROMIUM = "/Applications/Chromium.app/Contents/MacOS/Chromium"
# Prefer Chromium over Chrome
if os.path.exists(PATH_CHROMIUM):
options = webdriver.ChromeOptions()
options.binary_location = "/Applications/Chromium.app/Contents/MacOS/Chromium"
browser = webdriver.Chrome(chrome_options=options)
else:
browser = webdriver.Chrome()
if size:
assert type(size) == tuple
assert len(size) == 2
browser.set_window_size(*size)
# # On Linux, the focus of the application is in the address bar
# # of the browser after it is created. That creates problems while
# # working with some <input> fields. Moving the focus to the app to
# # the browser panel solves it.
# #
# # 1st Tab moves the focus to search box
# # 2nd Tab moves it further to the browser content
# if platform.system() == 'Linux':
# import pykeyboard
# k = pykeyboard.PyKeyboard()
# for n in range(2):
# k.tap_key('Tab')
# time.sleep(0.5)
if url:
browser.get(url)
return browser
def load(b, url, force=False, retries=3, retry_timeout=60):
assert b, "No browser"
if (not force) and (b.current_url == url):
return
for _try in range(retries):
try:
return b.get(url)
except selenium.common.exceptions.TimeoutException:
if _try < retries - 1:
time.sleep(retry_timeout)
else:
raise
| {
"content_hash": "6b2b2a97fd49eb7bac6661be5c988010",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 86,
"avg_line_length": 27.16923076923077,
"alnum_prop": 0.6132502831257078,
"repo_name": "alobbs/webest",
"id": "31b09be79950b2fbb53faa74c7d38bc2f0f6955e",
"size": "1766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webest/browser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9801"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
"""Support for Wink lights."""
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP, SUPPORT_COLOR, Light)
from homeassistant.components.wink import DOMAIN, WinkDevice
from homeassistant.util import color as color_util
from homeassistant.util.color import \
color_temperature_mired_to_kelvin as mired_to_kelvin
DEPENDENCIES = ['wink']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink lights."""
import pywink
for light in pywink.get_light_bulbs():
_id = light.object_id() + light.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkLight(light, hass)])
for light in pywink.get_light_groups():
_id = light.object_id() + light.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkLight(light, hass)])
class WinkLight(WinkDevice, Light):
"""Representation of a Wink light."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['light'].append(self)
@property
def is_on(self):
"""Return true if light is on."""
return self.wink.state()
@property
def brightness(self):
"""Return the brightness of the light."""
if self.wink.brightness() is not None:
return int(self.wink.brightness() * 255)
return None
@property
def hs_color(self):
"""Define current bulb color."""
if self.wink.supports_xy_color():
return color_util.color_xy_to_hs(*self.wink.color_xy())
if self.wink.supports_hue_saturation():
hue = self.wink.color_hue()
saturation = self.wink.color_saturation()
if hue is not None and saturation is not None:
return hue*360, saturation*100
return None
@property
def color_temp(self):
"""Define current bulb color in degrees Kelvin."""
if not self.wink.supports_temperature():
return None
return color_util.color_temperature_kelvin_to_mired(
self.wink.color_temperature_kelvin())
@property
def supported_features(self):
"""Flag supported features."""
supports = SUPPORT_BRIGHTNESS
if self.wink.supports_temperature():
supports = supports | SUPPORT_COLOR_TEMP
if self.wink.supports_xy_color():
supports = supports | SUPPORT_COLOR
elif self.wink.supports_hue_saturation():
supports = supports | SUPPORT_COLOR
return supports
def turn_on(self, **kwargs):
"""Turn the switch on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
hs_color = kwargs.get(ATTR_HS_COLOR)
color_temp_mired = kwargs.get(ATTR_COLOR_TEMP)
state_kwargs = {}
if hs_color:
if self.wink.supports_xy_color():
xy_color = color_util.color_hs_to_xy(*hs_color)
state_kwargs['color_xy'] = xy_color
if self.wink.supports_hue_saturation():
hs_scaled = hs_color[0]/360, hs_color[1]/100
state_kwargs['color_hue_saturation'] = hs_scaled
if color_temp_mired:
state_kwargs['color_kelvin'] = mired_to_kelvin(color_temp_mired)
if brightness:
state_kwargs['brightness'] = brightness / 255.0
self.wink.set_state(True, **state_kwargs)
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.wink.set_state(False)
| {
"content_hash": "bfe7415744638c0e47aa94718689c1c9",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 76,
"avg_line_length": 34.301886792452834,
"alnum_prop": 0.6105610561056105,
"repo_name": "nugget/home-assistant",
"id": "14a983154f82f5ee3273aec10efaa259ee74e521",
"size": "3636",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/wink/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14492390"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17526"
}
],
"symlink_target": ""
} |
from lib.aws import AmazonWebServices
from .common import * # NOQA
k8s_version = "v1.10.1-rancher1"
rke_config = {"authentication": {"type": "authnConfig", "strategy": "x509"},
"ignoreDockerVersion": False,
"type": "rancherKubernetesEngineConfig"}
RANCHER_CLEANUP_CLUSTER = os.environ.get('RANCHER_CLEANUP_CLUSTER', "True")
NETWORK_PLUGIN = os.environ.get('NETWORK_PLUGIN', "canal")
def test_rke_custom_k8s_1_8_10():
validate_k8s_version("v1.8.10-rancher1-1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_8_11():
validate_k8s_version("v1.8.11-rancher1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_9_5():
validate_k8s_version("v1.9.5-rancher1-1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_9_7():
validate_k8s_version("v1.9.7-rancher1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_10_0():
validate_k8s_version("v1.10.0-rancher1-1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_10_1():
validate_k8s_version("v1.10.1-rancher1", plugin=NETWORK_PLUGIN)
def validate_k8s_version(k8s_version, plugin="canal"):
rke_config["kubernetesVersion"] = k8s_version
rke_config["network"] = {"type": "networkConfig", "plugin": plugin}
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
8, random_test_name("testcustom"))
node_roles = [["controlplane"], ["controlplane"],
["etcd"], ["etcd"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = get_admin_client()
cluster = client.create_cluster(name=random_name(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "active"
i = 0
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster,
node_roles[i], aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
cluster = validate_cluster(client, cluster)
if RANCHER_CLEANUP_CLUSTER == "True":
delete_cluster(client, cluster)
delete_node(aws_nodes)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
| {
"content_hash": "8f96451741efb92927775923920f4f12",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 34.72727272727273,
"alnum_prop": 0.6234729493891797,
"repo_name": "sabiodelhielo/rancher-validation",
"id": "dfa46003999311ac99d4a18c5149dc6f7b512bf7",
"size": "2292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/v3_api/test_k8s_version_networkmodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20577"
},
{
"name": "Shell",
"bytes": "745"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid import core
paddle.enable_static()
np.random.seed(10)
# Situation 1: repeat_times is a list (without tensor)
class TestTileOpRank1(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "tile"
self.init_data()
self.inputs = {'X': np.random.random(self.ori_shape).astype("float32")}
self.attrs = {'repeat_times': self.repeat_times}
output = np.tile(self.inputs['X'], self.repeat_times)
self.outputs = {'Out': output}
def set_npu(self):
self.__class__.use_npu = True
def init_data(self):
self.ori_shape = [100]
self.repeat_times = [2]
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
# with dimension expanding
class TestTileOpRank2Expanding(TestTileOpRank1):
def init_data(self):
self.ori_shape = [120]
self.repeat_times = [2, 2]
class TestTileOpRank2(TestTileOpRank1):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [2, 3]
class TestTileOpRank3_Corner(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.repeat_times = (1, 1, 1)
class TestTileOpRank3_Corner2(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.repeat_times = (2, 2)
class TestTileOpRank3(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 15)
self.repeat_times = (2, 1, 4)
class TestTileOpRank4(TestTileOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 5, 7)
self.repeat_times = (3, 2, 1, 2)
# Situation 2: repeat_times is a list (with tensor)
class TestTileOpRank1_tensor_attr(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "tile"
self.init_data()
repeat_times_tensor = []
for index, ele in enumerate(self.repeat_times):
repeat_times_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele)
)
self.inputs = {
'X': np.random.random(self.ori_shape).astype("float32"),
'repeat_times_tensor': repeat_times_tensor,
}
self.attrs = {"repeat_times": self.infer_repeat_times}
output = np.tile(self.inputs['X'], self.repeat_times)
self.outputs = {'Out': output}
def set_npu(self):
self.__class__.use_npu = True
def init_data(self):
self.ori_shape = [100]
self.repeat_times = [2]
self.infer_repeat_times = [-1]
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [1, 1]
self.infer_repeat_times = [1, -1]
class TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [2, 3]
self.infer_repeat_times = [-1, 3]
# Situation 3: repeat_times is a tensor
class TestTileOpRank1_tensor(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "tile"
self.init_data()
self.inputs = {
'X': np.random.random(self.ori_shape).astype("float32"),
'RepeatTimes': np.array(self.repeat_times).astype("int32"),
}
self.attrs = {}
output = np.tile(self.inputs['X'], self.repeat_times)
self.outputs = {'Out': output}
def set_npu(self):
self.__class__.use_npu = True
def init_data(self):
self.ori_shape = [100]
self.repeat_times = [2]
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
pass
class TestTileOpRank2_tensor(TestTileOpRank1_tensor):
def init_data(self):
self.ori_shape = [12, 14]
self.repeat_times = [2, 3]
# Situation 4: input x is Integer
class TestTileOpInteger(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "tile"
self.inputs = {
'X': np.random.randint(10, size=(4, 4, 5)).astype("int32")
}
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
# Situation 5: input x is Integer
class TestTileOpInt64_t(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "tile"
self.inputs = {
'X': np.random.randint(10, size=(2, 4, 5)).astype("int64")
}
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
# Situation 6: input x is Bool
class TestTileOpBool(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "tile"
self.inputs = {'X': np.random.randint(1, size=(2, 4, 5)).astype("bool")}
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
# Test python API
class TestTileAPI(unittest.TestCase):
def test_api(self):
with fluid.dygraph.guard(paddle.NPUPlace(0)):
np_x = np.random.random([12, 14]).astype("float32")
x = paddle.to_tensor(np_x)
positive_2 = np.array([2]).astype("int32")
positive_2 = paddle.to_tensor(positive_2)
repeat_times = np.array([2, 3]).astype("int32")
repeat_times = paddle.to_tensor(repeat_times)
out_1 = paddle.tile(x, repeat_times=[2, 3])
out_2 = paddle.tile(x, repeat_times=[positive_2, 3])
out_3 = paddle.tile(x, repeat_times=repeat_times)
assert np.array_equal(out_1.numpy(), np.tile(np_x, (2, 3)))
assert np.array_equal(out_2.numpy(), np.tile(np_x, (2, 3)))
assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3)))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c3dae821112098abce86fb190559d370",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 80,
"avg_line_length": 28.20242914979757,
"alnum_prop": 0.5829744473155326,
"repo_name": "luotao1/Paddle",
"id": "e8003f82aa950c47307c68a8644b6b4144cae71b",
"size": "7579",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/npu/test_tile_op_npu.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
from .eccodes import *
from .eccodes import __version__
if sys.version_info >= (2, 6):
from .high_level.gribfile import GribFile
from .high_level.gribmessage import GribMessage
from .high_level.gribindex import GribIndex
from .high_level.bufr import BufrFile, BufrMessage
| {
"content_hash": "fbbc759542d272df84da9774cb24a767",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 54,
"avg_line_length": 30.90909090909091,
"alnum_prop": 0.7441176470588236,
"repo_name": "0x1mason/GribApi.XP",
"id": "42006b9f08a12ba0f63528253e1717b6609fc5ea",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grib_api/python/eccodes/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "138199"
},
{
"name": "Awk",
"bytes": "45504"
},
{
"name": "Batchfile",
"bytes": "7420"
},
{
"name": "C",
"bytes": "10047811"
},
{
"name": "C#",
"bytes": "54012"
},
{
"name": "C++",
"bytes": "242436"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "659136"
},
{
"name": "CSS",
"bytes": "9136"
},
{
"name": "DIGITAL Command Language",
"bytes": "38021"
},
{
"name": "Fortran",
"bytes": "277520"
},
{
"name": "HTML",
"bytes": "738350"
},
{
"name": "Lex",
"bytes": "8266"
},
{
"name": "M4",
"bytes": "67194"
},
{
"name": "Makefile",
"bytes": "1359117"
},
{
"name": "Module Management System",
"bytes": "3619"
},
{
"name": "Pascal",
"bytes": "70297"
},
{
"name": "Perl",
"bytes": "112466"
},
{
"name": "Python",
"bytes": "160689"
},
{
"name": "Roff",
"bytes": "281590"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "1139495"
},
{
"name": "XS",
"bytes": "16634"
},
{
"name": "Yacc",
"bytes": "35569"
}
],
"symlink_target": ""
} |
"""
Created on Fri Apr 8 12:46:55 2016
@author: Manojkumar Parmar VirKrupa
@Github Repo :
Modified on :
Version :
Remarks:
"""
class Node(object):
def __init__(self, name):
self.name = str(name)
def getName(self):
return self.name
def __str__(self):
return self.name
class Edge(object):
def __init__(self, src, dest):
self.src = src
self.dest = dest
def getSource(self):
return self.src
def getDestination(self):
return self.dest
def __str__(self):
return str(self.src) + '->' + str(self.dest)
class WeightedEdge(Edge):
def __init__(self, src, dest, weight = 1.0):
self.src = src
self.dest = dest
self.weight = weight
def getWeight(self):
return self.weight
def __str__(self):
return str(self.src) + '->(' + str(self.weight) + ')'\
+ str(self.dest)
class Digraph(object):
def __init__(self):
self.nodes = set([])
self.edges = {}
def addNode(self, node):
if node in self.nodes:
raise ValueError('Duplicate node')
else:
self.nodes.add(node)
self.edges[node] = []
def addEdge(self, edge):
src = edge.getSource()
dest = edge.getDestination()
if not(src in self.nodes and dest in self.nodes):
raise ValueError('Node not in graph')
self.edges[src].append(dest)
def childrenOf(self, node):
return self.edges[node]
def hasNode(self, node):
return node in self.nodes
def __str__(self):
res = ''
for k in self.edges:
for d in self.edges[k]:
res = res + str(k) + '->' + str(d) + '\n'
return res[:-1]
class Graph(Digraph):
def addEdge(self, edge):
Digraph.addEdge(self, edge)
rev = Edge(edge.getDestination(), edge.getSource())
Digraph.addEdge(self, rev)
nodes = []
nodes.append(Node("ABC")) # nodes[0]
nodes.append(Node("ACB")) # nodes[1]
nodes.append(Node("BAC")) # nodes[2]
nodes.append(Node("BCA")) # nodes[3]
nodes.append(Node("CAB")) # nodes[4]
nodes.append(Node("CBA")) # nodes[5]
g = Graph()
for n in nodes:
g.addNode(n)
# nodes[0] ABC <-> (ACB, BAC) |keeping (A, C) constant |
# No repeat
g.addEdge(Edge(nodes[0],nodes[1]))
g.addEdge(Edge(nodes[0],nodes[2]))
# nodes[1] ACB <-> (ABC, CAB) |keeping (A, B) constant |
# ACB-ABC repeat
g.addEdge(Edge(nodes[1],nodes[4]))
# nodes[2] BAC <-> ( BCA, ABC) |keeping (B, C) constant |
# BAC-ABC repeat
g.addEdge(Edge(nodes[2],nodes[3]))
# nodes[3] BCA <-> (CBA, BAC) |keeping (A, B) constant |
# BCA-BAC repeat
g.addEdge(Edge(nodes[3],nodes[5]))
# nodes[4] CAB <-> (ACB, CBA) |keeping (B, C) constant |
# CAB-BAC, repeat
g.addEdge(Edge(nodes[4],nodes[5]))
# nodes[5] CBA <-> (BCA, CAB) |keeping (A, C) constant |
# CBA-BCA, CBA-CAB repeat
print g | {
"content_hash": "ea159f26ec71c54a41305d70010356e7",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 62,
"avg_line_length": 25.2,
"alnum_prop": 0.5710835058661146,
"repo_name": "parmarmanojkumar/MITx_Python",
"id": "c868bf2b17455836b5419a10fc57297beaf3e219",
"size": "2922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "6002x/week5/L9P2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "128650"
}
],
"symlink_target": ""
} |
"""pred.py
Usage: pred.py <model_dir> [--evaluation=<DATA>] [--debug]
Options:
--evaluation=<DATA> [default: kbp_eval]
"""
import os
from docopt import docopt
import numpy as np
from utils import np_softmax
from pprint import pprint
from configs.config import Config
from data.dataset import Dataset, Split
from data.adaptors import *
from data.typecheck import TypeCheckAdaptor
from models import get_model
import cPickle as pkl
if __name__ == '__main__':
mydir = os.path.dirname(os.path.abspath(__file__))
args = docopt(__doc__)
root = os.path.abspath(args['<model_dir>'])
config = Config.load(os.path.join(root, 'config.json'))
with open(os.path.join(root, 'featurizer.pkl')) as f:
featurizer = pkl.load(f)
typechecker = TypeCheckAdaptor(os.path.join(mydir, 'data', 'raw', 'typecheck.csv'), featurizer.vocab)
model = get_model(config, featurizer.vocab, typechecker)
model.load_weights(os.path.join(root, 'best_weights'))
dev_generator = {
'kbp_eval': KBPEvaluationDataAdaptor().to_examples(os.path.join(mydir, 'data', 'raw', 'evaluation.tsv')),
'supervised': SupervisedDataAdaptor().to_examples(os.path.join(mydir, 'data', 'raw', 'supervision.csv')),
'kbp_sample': KBPDataAdaptor().to_examples(os.path.join(mydir, 'data', 'raw', 'test.sample.tsv')),
}[args['--evaluation']]
from train import Trainer
dev_split = Split(dev_generator, featurizer, add=False)
scoring_labels = [i for i in xrange(len(featurizer.vocab['rel'])) if i != featurizer.vocab['rel']['no_relation']]
trainer = Trainer('.', model, typechecker, scoring_labels)
best_scores = trainer.run_epoch(dev_split, train=False, return_pred=True)
todir = os.path.join(root, 'preds')
if not os.path.isdir(todir):
os.makedirs(todir)
print 'predictions output at', todir
from plot_utils import plot_confusion_matrix, plot_histogram, get_sorted_labels, parse_gabor_report, parse_sklearn_report, combine_report, retrieve_wrong_examples
import json
from sklearn.metrics import classification_report
wrongs = retrieve_wrong_examples(dev_split.examples,
best_scores['ids'],
best_scores['preds'],
best_scores['targs'],
featurizer.vocab
)
with open(os.path.join(todir, 'wrongs.json'), 'wb') as f:
json.dump(wrongs, f, indent=2, sort_keys=True)
sklearn_report = classification_report(
best_scores['targs'], best_scores['preds'],
target_names=featurizer.vocab['rel'].index2word)
with open(os.path.join(mydir, 'data', 'raw', 'gabor_report.txt')) as f:
gabor = f.read()
gabor_report = parse_gabor_report(gabor)
sklearn_report = parse_sklearn_report(str(sklearn_report))
combined_report = combine_report(sklearn_report, gabor_report, featurizer.vocab['rel'].counts)
with open(os.path.join(todir, 'classification_report.txt'), 'wb') as f:
f.write(combined_report)
order, labels, counts = get_sorted_labels(best_scores['targs'], featurizer.vocab)
fig = plot_confusion_matrix(best_scores['targs'], best_scores['preds'], order, labels)
fig.savefig(os.path.join(todir, 'confusion_matrix.png'))
fig = plot_histogram(labels, counts)
fig.savefig(os.path.join(todir, 'relation_histogram.png'))
with open(os.path.join(todir, 'best_scores.json'), 'wb') as f:
del best_scores['preds']
del best_scores['targs']
del best_scores['ids']
json.dump(best_scores, f, sort_keys=True)
print 'best scores'
pprint(best_scores)
print 'best scores'
pprint(best_scores)
| {
"content_hash": "9813d97a6a0edbb82946f83ce032dfe6",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 166,
"avg_line_length": 40.18279569892473,
"alnum_prop": 0.6507894032646507,
"repo_name": "vzhong/sent2rel",
"id": "e29b85d48a6b23cbb948d3d400fba77bdb4dd3f3",
"size": "3801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pred.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "545018"
},
{
"name": "Python",
"bytes": "74526"
},
{
"name": "Shell",
"bytes": "634"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.