hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a952a0c6e9f23ef354e8ded859406787f17882c
| 3,071
|
py
|
Python
|
importgarmentdxf.py
|
r-tezuka/blender2.8_scale_and_location_changer_sample
|
0c1b2d31adb0174c7634783ac91fb07650ba7b81
|
[
"Apache-2.0"
] | null | null | null |
importgarmentdxf.py
|
r-tezuka/blender2.8_scale_and_location_changer_sample
|
0c1b2d31adb0174c7634783ac91fb07650ba7b81
|
[
"Apache-2.0"
] | null | null | null |
importgarmentdxf.py
|
r-tezuka/blender2.8_scale_and_location_changer_sample
|
0c1b2d31adb0174c7634783ac91fb07650ba7b81
|
[
"Apache-2.0"
] | null | null | null |
import bpy
import math
import sys
############# lib ########################
def degToRad(deg):
return 2 * math.pi / 360 * deg
def getObjHeight(obj):
obj.select_set(True)
x, y, z = bpy.context.active_object.dimensions
return z
def getObjThickness(obj):
obj.select_set(True)
x, y, z = bpy.context.active_object.dimensions
return y
def getEdgeVerts(obj):
if obj.type != "MESH":
print('Error: getEdgeVerts failed', file=sys.stderr)
sys.exit(1)
else:
return obj.data.vertices
def getMinMaxLocation():
obj = bpy.data.objects["Collision"]
verts = getEdgeVerts(obj)
xMax = 0
xMin = 0
yMax = 0
yMin = 0
zMax = 0
zMin = 0
for vert in verts:
location = vert.co
if location.x > xMax:
xMax = location.x
if location.x < xMin:
xMin = location.x
if location.y > yMax:
yMax = location.y
if location.y < yMin:
yMin = location.y
if location.z > zMax:
zMax = location.z
if location.z < zMin:
zMin = location.z
return [[xMin, yMin, zMin],[xMax, yMax, zMax]]
def move(obj, minMaxLocation):
obj.select_set(True)
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
obj.select_set(False)
obj.scale *= 0.001
if 'P0005' in obj.name: #BACK
obj.rotation_euler.y = degToRad(-90)
obj.rotation_euler.z = degToRad(-90)
obj.location = (0, minMaxLocation[1][1], minMaxLocation[1][2])
elif 'P0018' in obj.name: #FRONT
obj.rotation_euler.y = degToRad(-90)
obj.rotation_euler.z = degToRad(-90)
obj.location = (0, minMaxLocation[0][1], minMaxLocation[1][2])
elif 'P0031' in obj.name: #SLEEVE
obj.rotation_euler.y = degToRad(-120)
obj.location = (minMaxLocation[1][0]/2, 0, minMaxLocation[1][2])
obj.select_set(True)
bpy.ops.object.duplicate_move(OBJECT_OT_duplicate={"linked":True, "mode":'TRANSLATION'}, TRANSFORM_OT_translate={"value":(-minMaxLocation[1][0], 0, 0)})
obj.select_set(False)
bpy.ops.transform.rotate(value=degToRad(-60), orient_axis='Y')
elif 'P0044' in obj.name: #COLLAR
obj.rotation_euler.x = degToRad(-90)
obj.location = (0, minMaxLocation[1][1], minMaxLocation[1][2])
def propSet(obj):
obj.data.dimensions = '2D'
obj.data.splines[0].use_cyclic_u = True
obj.data.twist_mode = 'MINIMUM'
obj.data.fill_mode = 'BOTH'
def execPreProcess(objects):
for obj in objects:
if obj.type == "CURVE":
minMaxLocation = getMinMaxLocation()
move(obj, minMaxLocation)
propSet(obj)
elif obj.type == "MESH":
if obj.name != 'Collision':
bpy.data.objects.remove(obj)
continue
else:
bpy.data.objects.remove(obj)
################ main ######################
objects = bpy.context.visible_objects
execPreProcess(objects)
| 29.815534
| 160
| 0.57929
|
d279e738ad2a40fd09f0d65e5a8cb2251f5d5a88
| 17,375
|
py
|
Python
|
cogs/roles.py
|
8ka1alu/vs-cog
|
d5729948c86ef2bd5f7a227e3514db055ccb652d
|
[
"MIT"
] | null | null | null |
cogs/roles.py
|
8ka1alu/vs-cog
|
d5729948c86ef2bd5f7a227e3514db055ccb652d
|
[
"MIT"
] | 2
|
2021-06-08T21:57:07.000Z
|
2022-01-13T02:59:38.000Z
|
cogs/roles.py
|
8ka1alu/vs-cog
|
d5729948c86ef2bd5f7a227e3514db055ccb652d
|
[
"MIT"
] | null | null | null |
from discord.ext import commands # Bot Commands Frameworkのインポート
import discord
import asyncio
import random
import datetime
great_owner_id = 459936557432963103
# コグとして用いるクラスを定義。
class roles(commands.Cog):
# rolesクラスのコンストラクタ。Botを受取り、インスタンス変数として保持。
def __init__(self, bot):
self.bot = bot
# メインとなるroleコマンド
@commands.group(aliases=['rl'])
@commands.has_permissions(manage_guild=True)
async def role(self, ctx):
"""役職関連(管理者用)"""
# サブコマンドが指定されていない場合、メッセージを送信する。
if ctx.invoked_subcommand is None:
await ctx.send('このコマンドにはサブコマンドが必要です。')
# roleコマンドのサブコマンド
# 指定したユーザーに指定した役職を付与する。
@role.command(aliases=['ad'])
async def add(self, ctx, member: discord.Member, role: discord.Role):
"""付与(管理者用)"""
await member.add_roles(role)
await ctx.send('付与しました。')
# roleコマンドのサブコマンド
# 指定したユーザーから指定した役職を剥奪する。
@role.command(aliases=['rm'])
async def remove(self, ctx, member: discord.Member, role: discord.Role):
"""剥奪(管理者用)"""
await member.remove_roles(role)
await ctx.send('剥奪しました。')
# roleコマンドのサブコマンド
# 指定した役職を削除する。
@role.command(aliases=['dl'])
async def delete(self, ctx, role: discord.Role=None):
"""削除(管理者用)"""
if role == None:
await ctx.send('役職名を指定して下さい。')
return
#role = discord.utils.get(ctx.guild.roles, name=role_name)
await role.delete()
await ctx.send('削除しました。')
# roleコマンドのサブコマンド
# 役職を作成する。
@role.command(aliases=['cr'])
async def create(self, ctx, what= None):
"""作成(管理者用)"""
if what == None:
what = "new role"
rote = 0
#システム
hoist = False
mentionable = False
#基本権限
administrator = False
view_audit_log = False
manage_guild = False
manage_roles = False
manage_channels = False
kick_members = False
ban_members = False
create_instant_invite = False
change_nicknames = False
manage_nicknames = False
manage_emojis = False
manage_webhooks = False
read_messages = False
#テキスト権限
send_messages = False
send_tts_messages = False
manage_messages = False
embed_links = False
attach_files = False
read_message_history = False
mention_everyone = False
external_emojis = False
add_reactions = False
#ボイス権限
connect = False
speak = False
mute_members = False
deafen_members = False
move_members = False
use_voice_activation = False
while rote < 2:
if rote == 1:
await msg.delete()
if read_messages == True:
await msgf.delete()
await msgs.delete()
rote=0
await asyncio.sleep(0.4)
roleedit = discord.Embed(title="権限設定",description=f"番号・記号を入力して下さい。")
roleedit.add_field(name=f"**オンラインメンバーとは別にロールメンバーを表示する({hoist})**",value='`a`')
roleedit.add_field(name=f"**このロールに対して@mentionを許可する({mentionable})**",value='`b`')
roleedit.add_field(name=f"**管理者({administrator})**",value='`1`')
roleedit.add_field(name=f"**監査ログを表示({view_audit_log})**",value='`2`')
roleedit.add_field(name=f"**サーバーの管理({manage_guild})**",value='`3`')
roleedit.add_field(name=f"**ロールの管理({manage_roles})**",value='`4`')
roleedit.add_field(name=f"**チャンネルの管理({manage_channels})**",value='`5`')
roleedit.add_field(name=f"**メンバーをKICK({kick_members})**",value='`6`')
roleedit.add_field(name=f"**メンバーをBAN({ban_members})**",value='`7`')
roleedit.add_field(name=f"**招待を作成({create_instant_invite})**",value='`8`')
roleedit.add_field(name=f"**ニックネームの変更({change_nicknames})**",value='`9`')
roleedit.add_field(name=f"**ニックネームの管理({manage_nicknames})**",value='`10`')
roleedit.add_field(name=f"**絵文字の管理({manage_emojis})**",value='`11`')
roleedit.add_field(name=f"**ウェブフックの管理({manage_webhooks})**",value='`12`')
roleedit.add_field(name=f"**テキストチャンネルの閲覧&ボイスチャンネルの表示({read_messages})**",value='`13`')
roleedit.add_field(name="----------",value='----------')
roleedit.add_field(name="**無付与・設定完了**",value='`0`')
msg = await ctx.send(embed=roleedit)
if read_messages == True:
await asyncio.sleep(0.1)
roletxt = discord.Embed(title="テキストの権限",description=f"番号を入力して下さい。")
roletxt.add_field(name=f"**メッセージを送信({send_messages})**",value='`14`')
roletxt.add_field(name=f"**TTSメッセージを送信({send_tts_messages})**",value='`15`')
roletxt.add_field(name=f"**メッセージの管理({manage_messages})**",value='`16`')
roletxt.add_field(name=f"**埋め込みリンク({embed_links})**",value='`17`')
roletxt.add_field(name=f"**ファイルの添付({attach_files})**",value='`18`')
roletxt.add_field(name=f"**メッセージ履歴を読む({read_message_history})**",value='`19`')
roletxt.add_field(name=f"**@everyone,@here,すべてのロールにメンション({mention_everyone})**",value='`20`')
roletxt.add_field(name=f"**外部の絵文字の使用({external_emojis})**",value='`21`')
roletxt.add_field(name=f"**リアクションの追加({add_reactions})**",value='`22`')
roletxt.add_field(name="----------",value='----------')
roletxt.add_field(name="**無付与・設定完了**",value='`0`')
msgs = await ctx.channel.send(embed=roletxt)
await asyncio.sleep(0.1)
rolevoc = discord.Embed(title="音声の権限",description=f"番号を入力して下さい。")
rolevoc.add_field(name=f"**接続({connect})**",value='`23`')
rolevoc.add_field(name=f"**発言({speak})**",value='`24`')
rolevoc.add_field(name=f"**メンバーをミュート({mute_members})**",value='`25`')
rolevoc.add_field(name=f"**メンバーのスピーカーをミュート({deafen_members})**",value='`26`')
rolevoc.add_field(name=f"**メンバーを移動({move_members})**",value='`27`')
rolevoc.add_field(name=f"**音声検出を使用({use_voice_activation})**",value='`28`')
rolevoc.add_field(name="----------",value='----------')
rolevoc.add_field(name="**無付与・設定完了**",value='`0`')
msgf = await ctx.send(embed=rolevoc)
def rotetime(m):
return m.content == "a" or "b" or "0" or "1" or "2" or "3" or "4" or "5" or "6" or "7" or "8" or "9" or "10" or "11" or "12" or "13" or "14" or "15" or "16" or "17" or "18" "19" or "20" or "21" or "22" or "23" or "24" or "25" or "26" or "27" or "28" and m.author == ctx.author
try:
reply = await self.bot.wait_for( "message" , check = rotetime , timeout = 300.0 )
except asyncio.TimeoutError:
await ctx.channel.send( "設定を中止します。(type:time over)" )
return
else:
if reply.content == "0":
await msg.delete()
if read_messages == True:
await msgf.delete()
await msgs.delete()
rote = 2
elif reply.content == "a":
if hoist == False:
hoist = True
elif hoist == True:
hoist = False
rote = 1
elif reply.content == "b":
if mentionable == False:
mentionable = True
elif mentionable == True:
mentionable = False
rote = 1
elif reply.content == "1":
if administrator == False:
administrator = True
elif administrator == True:
administrator = False
rote = 1
elif reply.content == "2":
if view_audit_log == False:
view_audit_log = True
elif view_audit_log == True:
view_audit_log = False
rote = 1
elif reply.content == "3":
if manage_guild == False:
manage_guild = True
elif manage_guild == True:
manage_guild = False
rote = 1
elif reply.content == "4":
if manage_roles == False:
manage_roles = True
elif manage_roles == True:
manage_roles = False
rote = 1
elif reply.content == "5":
if manage_channels == False:
manage_channels = True
elif manage_channels == True:
manage_channels = False
rote = 1
elif reply.content == "6":
if kick_members == False:
kick_members = True
elif kick_members == True:
kick_members = False
rote = 1
elif reply.content == "7":
if ban_members == False:
ban_members = True
elif ban_members == True:
ban_members = False
rote = 1
elif reply.content == "8":
if create_instant_invite == False:
create_instant_invite = True
elif create_instant_invite == True:
create_instant_invite = False
rote = 1
elif reply.content == "9":
if change_nicknames == False:
change_nicknames = True
elif change_nicknames == True:
change_nicknames = False
rote = 1
elif reply.content == "10":
if manage_nicknames == False:
manage_nicknames = True
elif manage_nicknames == True:
manage_nicknames = False
rote = 1
elif reply.content == "11":
if manage_emojis == False:
manage_emojis = True
elif manage_emojis == True:
manage_emojis = False
rote = 1
elif reply.content == "12":
if manage_webhooks == False:
manage_webhooks = True
elif manage_webhooks == True:
manage_webhooks = False
rote = 1
elif reply.content == "13":
if read_messages == False:
read_messages = True
msgf = await ctx.send("○")
msgs = await ctx.send("○")
elif read_messages == True:
read_messages = False
send_messages = False
send_tts_messages = False
manage_messages = False
embed_links = False
attach_files = False
read_message_history = False
mention_everyone = False
external_emojis = False
add_reactions = False
connect = False
speak = False
mute_members = False
deafen_members = False
move_members = False
use_voice_activation = False
await msgf.delete()
await msgs.delete()
rote = 1
elif reply.content == "14":
if send_messages == False:
send_messages = True
elif send_messages == True:
send_messages = False
rote = 1
elif reply.content == "15":
if send_tts_messages == False:
send_tts_messages = True
elif send_tts_messages == True:
send_tts_messages = False
rote = 1
elif reply.content == "16":
if manage_messages == False:
manage_messages = True
elif manage_messages == True:
manage_messages = False
rote = 1
elif reply.content == "17":
if embed_links == False:
embed_links = True
elif embed_links == True:
embed_links = False
rote = 1
elif reply.content == "18":
if attach_files == False:
attach_files = True
elif attach_files == True:
attach_files = False
rote = 1
elif reply.content == "19":
if read_message_history == False:
read_message_history = True
elif read_message_history == True:
read_message_history = False
rote = 1
elif reply.content == "20":
if mention_everyone == False:
mention_everyone = True
elif mention_everyone == True:
mention_everyone = False
rote = 1
elif reply.content == "21":
if external_emojis == False:
external_emojis = True
elif external_emojis == True:
external_emojis = False
rote = 1
elif reply.content == "22":
if add_reactions == False:
add_reactions = True
elif add_reactions == True:
add_reactions = False
rote = 1
elif reply.content == "23":
if connect == False:
connect = True
elif connect == True:
connect = False
rote = 1
elif reply.content == "24":
if speak == False:
speak = True
elif speak == True:
speak = False
rote = 1
elif reply.content == "25":
if mute_members == False:
mute_members = True
elif mute_members == True:
mute_members = False
rote = 1
elif reply.content == "26":
if deafen_members == False:
deafen_members = True
elif deafen_members == True:
deafen_members = False
rote = 1
elif reply.content == "27":
if move_members == False:
move_members = True
elif move_members == True:
move_members = False
rote = 1
elif reply.content == "28":
if use_voice_activation == False:
use_voice_activation = True
elif use_voice_activation == True:
use_voice_activation = False
rote = 1
else:
await asyncio.sleep(303.0)
await reply.delete()
pre = discord.Permissions(administrator=administrator,view_audit_log=view_audit_log,manage_guild=manage_guild,manage_roles=manage_roles,manage_channels=manage_channels,kick_members=kick_members,ban_members=ban_members,create_instant_invite=create_instant_invite,change_nickname=change_nicknames,manage_nicknames=manage_nicknames,manage_emojis=manage_emojis,manage_webhooks=manage_webhooks,read_messages=read_messages,send_messages=send_messages,
send_tts_messages=send_tts_messages,manage_messages=manage_messages,embed_links=embed_links,attach_files=attach_files,read_message_history=read_message_history,mention_everyone=mention_everyone,external_emojis=external_emojis,add_reactions=add_reactions,
connect=connect,speak=speak,mute_members=mute_members,deafen_members=deafen_members,move_members=move_members,use_voice_activation=use_voice_activation)
guild = ctx.guild
set_name2 = f"{what}"
await guild.create_role(name=set_name2,hoist=hoist,mentionable=mentionable,permissions=pre)
await ctx.send(f'作成しました。@' + set_name2)
# Bot本体側からコグを読み込む際に呼び出される関数。
def setup(bot):
bot.add_cog(roles(bot)) # mainにBotを渡してインスタンス化し、Botにコグとして登録する。
| 46.333333
| 453
| 0.489784
|
a9945448e950e74e839a5caabd3f985c7f5082aa
| 26,972
|
py
|
Python
|
openstack-placement-1.0.0/placement/objects/allocation.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
openstack-placement-1.0.0/placement/objects/allocation.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
openstack-placement-1.0.0/placement/objects/allocation.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_db import api as oslo_db_api
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import sql
from placement.db.sqlalchemy import models
from placement import db_api
from placement import exception
from placement.objects import consumer as consumer_obj
from placement.objects import project as project_obj
from placement.objects import resource_provider as rp_obj
from placement.objects import user as user_obj
from placement import resource_class_cache as rc_cache
_ALLOC_TBL = models.Allocation.__table__
_CONSUMER_TBL = models.Consumer.__table__
_INV_TBL = models.Inventory.__table__
_PROJECT_TBL = models.Project.__table__
_RP_TBL = models.ResourceProvider.__table__
_USER_TBL = models.User.__table__
LOG = logging.getLogger(__name__)
# The number of times to retry set_allocations if there has
# been a resource provider (not consumer) generation coflict.
RP_CONFLICT_RETRY_COUNT = 10
class Allocation(object):
def __init__(self, id=None, resource_provider=None, consumer=None,
resource_class=None, used=0, updated_at=None,
created_at=None):
self.id = id
self.resource_provider = resource_provider
self.resource_class = resource_class
self.consumer = consumer
self.used = used
self.updated_at = updated_at
self.created_at = created_at
@db_api.placement_context_manager.writer
def _delete_allocations_for_consumer(ctx, consumer_id):
"""Deletes any existing allocations that correspond to the allocations to
be written. This is wrapped in a transaction, so if the write subsequently
fails, the deletion will also be rolled back.
"""
del_sql = _ALLOC_TBL.delete().where(
_ALLOC_TBL.c.consumer_id == consumer_id)
ctx.session.execute(del_sql)
@db_api.placement_context_manager.writer
def _delete_allocations_by_ids(ctx, alloc_ids):
"""Deletes allocations having an internal id value in the set of supplied
IDs
"""
del_sql = _ALLOC_TBL.delete().where(_ALLOC_TBL.c.id.in_(alloc_ids))
ctx.session.execute(del_sql)
def _check_capacity_exceeded(ctx, allocs):
"""Checks to see if the supplied allocation records would result in any of
the inventories involved having their capacity exceeded.
Raises an InvalidAllocationCapacityExceeded exception if any inventory
would be exhausted by the allocation. Raises an
InvalidAllocationConstraintsViolated exception if any of the `step_size`,
`min_unit` or `max_unit` constraints in an inventory will be violated
by any one of the allocations.
If no inventories would be exceeded or violated by the allocations, the
function returns a list of `ResourceProvider` objects that contain the
generation at the time of the check.
:param ctx: `placement.context.RequestContext` that has an oslo_db
Session
:param allocs: List of `Allocation` objects to check
"""
# The SQL generated below looks like this:
# SELECT
# rp.id,
# rp.uuid,
# rp.generation,
# inv.resource_class_id,
# inv.total,
# inv.reserved,
# inv.allocation_ratio,
# allocs.used
# FROM resource_providers AS rp
# JOIN inventories AS i1
# ON rp.id = i1.resource_provider_id
# LEFT JOIN (
# SELECT resource_provider_id, resource_class_id, SUM(used) AS used
# FROM allocations
# WHERE resource_class_id IN ($RESOURCE_CLASSES)
# AND resource_provider_id IN ($RESOURCE_PROVIDERS)
# GROUP BY resource_provider_id, resource_class_id
# ) AS allocs
# ON inv.resource_provider_id = allocs.resource_provider_id
# AND inv.resource_class_id = allocs.resource_class_id
# WHERE rp.id IN ($RESOURCE_PROVIDERS)
# AND inv.resource_class_id IN ($RESOURCE_CLASSES)
#
# We then take the results of the above and determine if any of the
# inventory will have its capacity exceeded.
rc_ids = set([rc_cache.RC_CACHE.id_from_string(a.resource_class)
for a in allocs])
provider_uuids = set([a.resource_provider.uuid for a in allocs])
provider_ids = set([a.resource_provider.id for a in allocs])
usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
_ALLOC_TBL.c.resource_class_id,
sql.func.sum(_ALLOC_TBL.c.used).label('used')])
usage = usage.where(
sa.and_(_ALLOC_TBL.c.resource_class_id.in_(rc_ids),
_ALLOC_TBL.c.resource_provider_id.in_(provider_ids)))
usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id,
_ALLOC_TBL.c.resource_class_id)
usage = sa.alias(usage, name='usage')
inv_join = sql.join(
_RP_TBL, _INV_TBL,
sql.and_(_RP_TBL.c.id == _INV_TBL.c.resource_provider_id,
_INV_TBL.c.resource_class_id.in_(rc_ids)))
primary_join = sql.outerjoin(
inv_join, usage,
sql.and_(
_INV_TBL.c.resource_provider_id == usage.c.resource_provider_id,
_INV_TBL.c.resource_class_id == usage.c.resource_class_id)
)
cols_in_output = [
_RP_TBL.c.id.label('resource_provider_id'),
_RP_TBL.c.uuid,
_RP_TBL.c.generation,
_INV_TBL.c.resource_class_id,
_INV_TBL.c.total,
_INV_TBL.c.reserved,
_INV_TBL.c.allocation_ratio,
_INV_TBL.c.min_unit,
_INV_TBL.c.max_unit,
_INV_TBL.c.step_size,
usage.c.used,
]
sel = sa.select(cols_in_output).select_from(primary_join)
sel = sel.where(
sa.and_(_RP_TBL.c.id.in_(provider_ids),
_INV_TBL.c.resource_class_id.in_(rc_ids)))
records = ctx.session.execute(sel)
# Create a map keyed by (rp_uuid, res_class) for the records in the DB
usage_map = {}
provs_with_inv = set()
for record in records:
map_key = (record['uuid'], record['resource_class_id'])
if map_key in usage_map:
raise KeyError("%s already in usage_map, bad query" % str(map_key))
usage_map[map_key] = record
provs_with_inv.add(record["uuid"])
# Ensure that all providers have existing inventory
missing_provs = provider_uuids - provs_with_inv
if missing_provs:
class_str = ', '.join([rc_cache.RC_CACHE.string_from_id(rc_id)
for rc_id in rc_ids])
provider_str = ', '.join(missing_provs)
raise exception.InvalidInventory(
resource_class=class_str, resource_provider=provider_str)
res_providers = {}
rp_resource_class_sum = collections.defaultdict(
lambda: collections.defaultdict(int))
for alloc in allocs:
rc_id = rc_cache.RC_CACHE.id_from_string(alloc.resource_class)
rp_uuid = alloc.resource_provider.uuid
if rp_uuid not in res_providers:
res_providers[rp_uuid] = alloc.resource_provider
amount_needed = alloc.used
rp_resource_class_sum[rp_uuid][rc_id] += amount_needed
# No use checking usage if we're not asking for anything
if amount_needed == 0:
continue
key = (rp_uuid, rc_id)
try:
usage = usage_map[key]
except KeyError:
# The resource class at rc_id is not in the usage map.
raise exception.InvalidInventory(
resource_class=alloc.resource_class,
resource_provider=rp_uuid)
allocation_ratio = usage['allocation_ratio']
min_unit = usage['min_unit']
max_unit = usage['max_unit']
step_size = usage['step_size']
# check min_unit, max_unit, step_size
if (amount_needed < min_unit or amount_needed > max_unit or
amount_needed % step_size != 0):
LOG.warning(
"Allocation for %(rc)s on resource provider %(rp)s "
"violates min_unit, max_unit, or step_size. "
"Requested: %(requested)s, min_unit: %(min_unit)s, "
"max_unit: %(max_unit)s, step_size: %(step_size)s",
{'rc': alloc.resource_class,
'rp': rp_uuid,
'requested': amount_needed,
'min_unit': min_unit,
'max_unit': max_unit,
'step_size': step_size})
raise exception.InvalidAllocationConstraintsViolated(
resource_class=alloc.resource_class,
resource_provider=rp_uuid)
# usage["used"] can be returned as None
used = usage['used'] or 0
capacity = (usage['total'] - usage['reserved']) * allocation_ratio
if (capacity < (used + amount_needed) or
capacity < (used + rp_resource_class_sum[rp_uuid][rc_id])):
LOG.warning(
"Over capacity for %(rc)s on resource provider %(rp)s. "
"Needed: %(needed)s, Used: %(used)s, Capacity: %(cap)s",
{'rc': alloc.resource_class,
'rp': rp_uuid,
'needed': amount_needed,
'used': used,
'cap': capacity})
raise exception.InvalidAllocationCapacityExceeded(
resource_class=alloc.resource_class,
resource_provider=rp_uuid)
return res_providers
@db_api.placement_context_manager.reader
def _get_allocations_by_provider_id(ctx, rp_id):
allocs = sa.alias(_ALLOC_TBL, name="a")
consumers = sa.alias(_CONSUMER_TBL, name="c")
projects = sa.alias(_PROJECT_TBL, name="p")
users = sa.alias(_USER_TBL, name="u")
cols = [
allocs.c.id,
allocs.c.resource_class_id,
allocs.c.used,
allocs.c.updated_at,
allocs.c.created_at,
consumers.c.id.label("consumer_id"),
consumers.c.generation.label("consumer_generation"),
sql.func.coalesce(
consumers.c.uuid, allocs.c.consumer_id).label("consumer_uuid"),
projects.c.id.label("project_id"),
projects.c.external_id.label("project_external_id"),
users.c.id.label("user_id"),
users.c.external_id.label("user_external_id"),
]
# TODO(jaypipes): change this join to be on ID not UUID
consumers_join = sa.join(
allocs, consumers, allocs.c.consumer_id == consumers.c.uuid)
projects_join = sa.join(
consumers_join, projects, consumers.c.project_id == projects.c.id)
users_join = sa.join(
projects_join, users, consumers.c.user_id == users.c.id)
sel = sa.select(cols).select_from(users_join)
sel = sel.where(allocs.c.resource_provider_id == rp_id)
return [dict(r) for r in ctx.session.execute(sel)]
@db_api.placement_context_manager.reader
def _get_allocations_by_consumer_uuid(ctx, consumer_uuid):
allocs = sa.alias(_ALLOC_TBL, name="a")
rp = sa.alias(_RP_TBL, name="rp")
consumer = sa.alias(_CONSUMER_TBL, name="c")
project = sa.alias(_PROJECT_TBL, name="p")
user = sa.alias(_USER_TBL, name="u")
cols = [
allocs.c.id,
allocs.c.resource_provider_id,
rp.c.name.label("resource_provider_name"),
rp.c.uuid.label("resource_provider_uuid"),
rp.c.generation.label("resource_provider_generation"),
allocs.c.resource_class_id,
allocs.c.used,
consumer.c.id.label("consumer_id"),
consumer.c.generation.label("consumer_generation"),
sql.func.coalesce(
consumer.c.uuid, allocs.c.consumer_id).label("consumer_uuid"),
project.c.id.label("project_id"),
project.c.external_id.label("project_external_id"),
user.c.id.label("user_id"),
user.c.external_id.label("user_external_id"),
allocs.c.created_at,
allocs.c.updated_at,
]
# Build up the joins of the five tables we need to interact with.
rp_join = sa.join(allocs, rp, allocs.c.resource_provider_id == rp.c.id)
consumer_join = sa.join(rp_join, consumer,
allocs.c.consumer_id == consumer.c.uuid)
project_join = sa.join(consumer_join, project,
consumer.c.project_id == project.c.id)
user_join = sa.join(project_join, user,
consumer.c.user_id == user.c.id)
sel = sa.select(cols).select_from(user_join)
sel = sel.where(allocs.c.consumer_id == consumer_uuid)
return [dict(r) for r in ctx.session.execute(sel)]
@db_api.placement_context_manager.writer.independent
def _create_incomplete_consumers_for_provider(ctx, rp_id):
# TODO(jaypipes): Remove in Stein after a blocker migration is added.
"""Creates consumer record if consumer relationship between allocations ->
consumers table is missing for any allocation on the supplied provider
internal ID, using the "incomplete consumer" project and user CONF options.
"""
alloc_to_consumer = sa.outerjoin(
_ALLOC_TBL, consumer_obj.CONSUMER_TBL,
_ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid)
sel = sa.select([_ALLOC_TBL.c.consumer_id])
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(
sa.and_(
_ALLOC_TBL.c.resource_provider_id == rp_id,
consumer_obj.CONSUMER_TBL.c.id.is_(None)))
missing = ctx.session.execute(sel).fetchall()
if missing:
# Do a single INSERT for all missing consumer relationships for the
# provider
incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
cols = [
_ALLOC_TBL.c.consumer_id,
incomplete_proj_id,
incomplete_user_id,
]
sel = sa.select(cols)
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(
sa.and_(
_ALLOC_TBL.c.resource_provider_id == rp_id,
consumer_obj.CONSUMER_TBL.c.id.is_(None)))
# NOTE(mnaser): It is possible to have multiple consumers having many
# allocations to the same resource provider, which would
# make the INSERT FROM SELECT fail due to duplicates.
sel = sel.group_by(_ALLOC_TBL.c.consumer_id)
target_cols = ['uuid', 'project_id', 'user_id']
ins_stmt = consumer_obj.CONSUMER_TBL.insert().from_select(
target_cols, sel)
res = ctx.session.execute(ins_stmt)
if res.rowcount > 0:
LOG.info("Online data migration to fix incomplete consumers "
"for resource provider %s has been run. Migrated %d "
"incomplete consumer records on the fly.", rp_id,
res.rowcount)
@db_api.placement_context_manager.writer.independent
def _create_incomplete_consumer(ctx, consumer_id):
# TODO(jaypipes): Remove in Stein after a blocker migration is added.
"""Creates consumer record if consumer relationship between allocations ->
consumers table is missing for the supplied consumer UUID, using the
"incomplete consumer" project and user CONF options.
"""
alloc_to_consumer = sa.outerjoin(
_ALLOC_TBL, consumer_obj.CONSUMER_TBL,
_ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid)
sel = sa.select([_ALLOC_TBL.c.consumer_id])
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(
sa.and_(
_ALLOC_TBL.c.consumer_id == consumer_id,
consumer_obj.CONSUMER_TBL.c.id.is_(None)))
missing = ctx.session.execute(sel).fetchall()
if missing:
incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
ins_stmt = consumer_obj.CONSUMER_TBL.insert().values(
uuid=consumer_id, project_id=incomplete_proj_id,
user_id=incomplete_user_id)
res = ctx.session.execute(ins_stmt)
if res.rowcount > 0:
LOG.info("Online data migration to fix incomplete consumers "
"for consumer %s has been run. Migrated %d incomplete "
"consumer records on the fly.", consumer_id, res.rowcount)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@db_api.placement_context_manager.writer
def _set_allocations(context, allocs):
"""Write a set of allocations.
We must check that there is capacity for each allocation.
If there is not we roll back the entire set.
:raises `exception.ResourceClassNotFound` if any resource class in any
allocation in allocs cannot be found in either the DB.
:raises `exception.InvalidAllocationCapacityExceeded` if any inventory
would be exhausted by the allocation.
:raises `InvalidAllocationConstraintsViolated` if any of the
`step_size`, `min_unit` or `max_unit` constraints in an
inventory will be violated by any one of the allocations.
:raises `ConcurrentUpdateDetected` if a generation for a resource
provider or consumer failed its increment check.
"""
# First delete any existing allocations for any consumers. This
# provides a clean slate for the consumers mentioned in the list of
# allocations being manipulated.
consumer_ids = set(alloc.consumer.uuid for alloc in allocs)
for consumer_id in consumer_ids:
_delete_allocations_for_consumer(context, consumer_id)
# Before writing any allocation records, we check that the submitted
# allocations do not cause any inventory capacity to be exceeded for
# any resource provider and resource class involved in the allocation
# transaction. _check_capacity_exceeded() raises an exception if any
# inventory capacity is exceeded. If capacity is not exceeeded, the
# function returns a list of ResourceProvider objects containing the
# generation of the resource provider at the time of the check. These
# objects are used at the end of the allocation transaction as a guard
# against concurrent updates.
#
# Don't check capacity when alloc.used is zero. Zero is not a valid
# amount when making an allocation (the minimum consumption of a
# resource is one) but is used in this method to indicate a need for
# removal. Providing 0 is controlled at the HTTP API layer where PUT
# /allocations does not allow empty allocations. When POST /allocations
# is implemented it will for the special case of atomically setting and
# removing different allocations in the same request.
# _check_capacity_exceeded will raise a ResourceClassNotFound # if any
# allocation is using a resource class that does not exist.
visited_consumers = {}
visited_rps = _check_capacity_exceeded(context, allocs)
for alloc in allocs:
if alloc.consumer.id not in visited_consumers:
visited_consumers[alloc.consumer.id] = alloc.consumer
# If alloc.used is set to zero that is a signal that we don't want
# to (re-)create any allocations for this resource class.
# _delete_current_allocs has already wiped out allocations so just
# continue
if alloc.used == 0:
continue
consumer_id = alloc.consumer.uuid
rp = alloc.resource_provider
rc_id = rc_cache.RC_CACHE.id_from_string(alloc.resource_class)
ins_stmt = _ALLOC_TBL.insert().values(
resource_provider_id=rp.id,
resource_class_id=rc_id,
consumer_id=consumer_id,
used=alloc.used)
res = context.session.execute(ins_stmt)
alloc.id = res.lastrowid
# Generation checking happens here. If the inventory for this resource
# provider changed out from under us, this will raise a
# ConcurrentUpdateDetected which can be caught by the caller to choose
# to try again. It will also rollback the transaction so that these
# changes always happen atomically.
for rp in visited_rps.values():
rp.increment_generation()
for consumer in visited_consumers.values():
consumer.increment_generation()
# If any consumers involved in this transaction ended up having no
# allocations, delete the consumer records. Exclude consumers that had
# *some resource* in the allocation list with a total > 0 since clearly
# those consumers have allocations...
cons_with_allocs = set(a.consumer.uuid for a in allocs if a.used > 0)
all_cons = set(c.uuid for c in visited_consumers.values())
consumers_to_check = all_cons - cons_with_allocs
consumer_obj.delete_consumers_if_no_allocations(
context, consumers_to_check)
def get_all_by_resource_provider(context, rp):
_create_incomplete_consumers_for_provider(context, rp.id)
db_allocs = _get_allocations_by_provider_id(context, rp.id)
# Build up a list of Allocation objects, setting the Allocation object
# fields to the same-named database record field we got from
# _get_allocations_by_provider_id(). We already have the
# ResourceProvider object so we just pass that object to the Allocation
# object constructor as-is
objs = []
for rec in db_allocs:
consumer = consumer_obj.Consumer(
context, id=rec['consumer_id'],
uuid=rec['consumer_uuid'],
generation=rec['consumer_generation'],
project=project_obj.Project(
context, id=rec['project_id'],
external_id=rec['project_external_id']),
user=user_obj.User(
context, id=rec['user_id'],
external_id=rec['user_external_id']))
objs.append(
Allocation(
id=rec['id'], resource_provider=rp,
resource_class=rc_cache.RC_CACHE.string_from_id(
rec['resource_class_id']),
consumer=consumer,
used=rec['used'],
created_at=rec['created_at'],
updated_at=rec['updated_at']))
return objs
def get_all_by_consumer_id(context, consumer_id):
_create_incomplete_consumer(context, consumer_id)
db_allocs = _get_allocations_by_consumer_uuid(context, consumer_id)
if not db_allocs:
return []
# Build up the Consumer object (it's the same for all allocations
# since we looked up by consumer ID)
db_first = db_allocs[0]
consumer = consumer_obj.Consumer(
context, id=db_first['consumer_id'],
uuid=db_first['consumer_uuid'],
generation=db_first['consumer_generation'],
project=project_obj.Project(
context, id=db_first['project_id'],
external_id=db_first['project_external_id']),
user=user_obj.User(
context, id=db_first['user_id'],
external_id=db_first['user_external_id']))
# Build up a list of Allocation objects, setting the Allocation object
# fields to the same-named database record field we got from
# _get_allocations_by_consumer_id().
#
# NOTE(jaypipes): Unlike with get_all_by_resource_provider(), we do
# NOT already have the ResourceProvider object so we construct a new
# ResourceProvider object below by looking at the resource provider
# fields returned by _get_allocations_by_consumer_id().
alloc_list = [
Allocation(
id=rec['id'],
resource_provider=rp_obj.ResourceProvider(
context,
id=rec['resource_provider_id'],
uuid=rec['resource_provider_uuid'],
name=rec['resource_provider_name'],
generation=rec['resource_provider_generation']),
resource_class=rc_cache.RC_CACHE.string_from_id(
rec['resource_class_id']),
consumer=consumer,
used=rec['used'],
created_at=rec['created_at'],
updated_at=rec['updated_at'])
for rec in db_allocs
]
return alloc_list
def replace_all(context, alloc_list):
"""Replace the supplied allocations.
:note: This method always deletes all allocations for all consumers
referenced in the list of Allocation objects and then replaces
the consumer's allocations with the Allocation objects. In doing
so, it will end up setting the Allocation.id attribute of each
Allocation object.
"""
# Retry _set_allocations server side if there is a
# ResourceProviderConcurrentUpdateDetected. We don't care about
# sleeping, we simply want to reset the resource provider objects
# and try again. For sake of simplicity (and because we don't have
# easy access to the information) we reload all the resource
# providers that may be present.
retries = RP_CONFLICT_RETRY_COUNT
while retries:
retries -= 1
try:
_set_allocations(context, alloc_list)
break
except exception.ResourceProviderConcurrentUpdateDetected:
LOG.debug('Retrying allocations write on resource provider '
'generation conflict')
# We only want to reload each unique resource provider once.
alloc_rp_uuids = set(
alloc.resource_provider.uuid for alloc in alloc_list)
seen_rps = {}
for rp_uuid in alloc_rp_uuids:
seen_rps[rp_uuid] = rp_obj.ResourceProvider.get_by_uuid(
context, rp_uuid)
for alloc in alloc_list:
rp_uuid = alloc.resource_provider.uuid
alloc.resource_provider = seen_rps[rp_uuid]
else:
# We ran out of retries so we need to raise again.
# The log will automatically have request id info associated with
# it that will allow tracing back to specific allocations.
# Attempting to extract specific consumer or resource provider
# information from the allocations is not coherent as this
# could be multiple consumers and providers.
LOG.warning('Exceeded retry limit of %d on allocations write',
RP_CONFLICT_RETRY_COUNT)
raise exception.ResourceProviderConcurrentUpdateDetected()
def delete_all(context, alloc_list):
consumer_uuids = set(alloc.consumer.uuid for alloc in alloc_list)
alloc_ids = [alloc.id for alloc in alloc_list]
_delete_allocations_by_ids(context, alloc_ids)
consumer_obj.delete_consumers_if_no_allocations(
context, consumer_uuids)
| 43.363344
| 79
| 0.671622
|
3646b926f12f2186bbb7023c94eabfc5c300fb9f
| 9,143
|
py
|
Python
|
bcipy/acquisition/protocols/lsl/lsl_connector.py
|
mberkanbicer/BciPy
|
c18878ad6fc4d1f69e2091b8f029f3b9ab9a923a
|
[
"MIT"
] | 32
|
2020-11-13T17:53:25.000Z
|
2022-03-24T21:12:31.000Z
|
bcipy/acquisition/protocols/lsl/lsl_connector.py
|
mberkanbicer/BciPy
|
c18878ad6fc4d1f69e2091b8f029f3b9ab9a923a
|
[
"MIT"
] | 20
|
2020-12-02T17:40:42.000Z
|
2022-03-16T16:38:05.000Z
|
bcipy/acquisition/protocols/lsl/lsl_connector.py
|
mberkanbicer/BciPy
|
c18878ad6fc4d1f69e2091b8f029f3b9ab9a923a
|
[
"MIT"
] | 10
|
2020-12-16T02:32:37.000Z
|
2022-03-23T16:31:59.000Z
|
# pylint: disable=fixme
"""Defines the driver for the Device for communicating with
LabStreamingLayer (LSL)."""
import logging
from typing import List, Dict, Any
import pylsl
from bcipy.acquisition.protocols.connector import Connector
from bcipy.acquisition.connection_method import ConnectionMethod
from bcipy.acquisition.devices import DeviceSpec
log = logging.getLogger(__name__)
LSL_TIMESTAMP = 'LSL_timestamp'
class Marker():
"""Data class which wraps a LSL marker; data pulled from a marker stream is
a tuple where the first item is a list of channels and second item is the
timestamp. Assumes that marker inlet only has a single channel."""
def __init__(self, data=(None, None)):
super(Marker, self).__init__()
self.channels, self.timestamp = data
@classmethod
def empty(cls):
"""Creates an empty Marker."""
return Marker()
def __repr__(self):
return f"<value: {self.trg}, timestamp: {self.timestamp}>"
@property
def is_empty(self):
"""Test to see if the current marker is empty."""
return self.channels is None or self.timestamp is None
@property
def trg(self):
"""Get the trigger."""
# pylint: disable=unsubscriptable-object
return self.channels[0] if self.channels else None
def inlet_name(inlet) -> str:
"""Returns the name of a pylsl streamInlet."""
name = '_'.join(inlet.info().name().split())
return name.replace('-', '_')
def channel_names(stream_info: pylsl.StreamInfo) -> List[str]:
"""Extracts the channel names from the LSL Stream metadata."""
channels = []
if stream_info.desc().child("channels").empty():
return channels
channel = stream_info.desc().child("channels").child("channel")
for _ in range(stream_info.channel_count()):
channel_name = channel.child_value("label")
channels.append(channel_name)
channel = channel.next_sibling()
return channels
def rename_items(items: List[str], rules: Dict[str, str]) -> None:
"""Renames items based on the provided rules.
Parameters
----------
items - list of items ; values will be mutated
rules - change key -> value
"""
for key, val in rules.items():
if key in items:
items[items.index(key)] = val
class LslConnector(Connector):
"""Connects to any device streaming data through the LabStreamingLayer lib.
Parameters
----------
connection_params : dict
parameters used to connect with the server.
device_spec : DeviceSpec
details about the device data that is being streamed over LSL.
include_lsl_timestamp: bool, optional
if True, appends the LSL_timestamp to each sample.
include_marker_streams : bool, optional
if True, listens for marker streams and merges them with the data
stream based on its LSL timestamp. The additional columns use the
stream names.
rename_rules : dict, optional
rules for renaming channels
"""
# pylint: disable=too-many-instance-attributes,too-many-arguments
def __init__(self,
connection_params: Dict[str, Any] = {},
device_spec: DeviceSpec = None,
include_lsl_timestamp: bool = False,
include_marker_streams: bool = False,
rename_rules: Dict[str, str] = None):
super(LslConnector, self).__init__(connection_params, device_spec)
assert device_spec, "DeviceSpec is required"
self._appended_channels = []
if include_lsl_timestamp:
self._appended_channels.append(LSL_TIMESTAMP)
self._inlet = None
self._marker_inlets = []
self.include_marker_streams = include_marker_streams
self.rename_rules = rename_rules or {}
# There can be 1 current marker for each marker channel.
self.current_markers = {}
@classmethod
def supports(cls, device_spec: DeviceSpec,
connection_method: ConnectionMethod) -> bool:
# The content_type requirement can be relaxed if the `connect` method is refactored
# to resolve the LSL stream based on the device_spec.
return connection_method == ConnectionMethod.LSL
@property
def name(self):
if 'stream_name' in self.connection_params:
return self.connection_params['stream_name']
if self._inlet and self._inlet.info().name():
return self._inlet.info().name()
return self.device_spec.name
def connect(self):
"""Connect to the data source."""
# Streams can be queried by name, type (xdf file format spec), and
# other metadata.
# NOTE: According to the documentation this is a blocking call that can
# only be performed on the main thread in Linux systems. So far testing
# seems fine when done in a separate multiprocessing.Process.
eeg_streams = pylsl.resolve_stream('type',
self.device_spec.content_type)
marker_streams = pylsl.resolve_stream(
'type', 'Markers') if self.include_marker_streams else []
assert eeg_streams, f"One or more {self.device_spec.content_type} streams must be present"
self._inlet = pylsl.StreamInlet(eeg_streams[0])
self._marker_inlets = [
pylsl.StreamInlet(inlet) for inlet in marker_streams
]
# initialize the current_markers for each marker stream.
for inlet in self._marker_inlets:
self.current_markers[inlet_name(inlet)] = Marker.empty()
def acquisition_init(self):
"""Initialization step. Reads the channel and data rate information
sent by the server and sets the appropriate instance variables.
"""
assert self._inlet is not None, "Connect call is required."
metadata = self._inlet.info()
self.log_info(metadata)
channels = channel_names(metadata)
# Confirm that provided channels match metadata, or meta is empty.
if channels and self.device_spec.channels != channels:
print(f"device channels: {channels}")
print(self.device_spec.channels)
raise Exception(f"Channels read from the device do not match "
"the provided parameters.")
assert len(self.device_spec.channels) == metadata.channel_count(
), "Channel count error"
if self.device_spec.sample_rate != metadata.nominal_srate():
raise Exception("Sample frequency read from device does not match "
"the provided parameter")
rename_items(self.channels, self.rename_rules)
self.channels.extend(self._appended_channels)
self.channels.extend(self.marker_stream_names())
assert len(self.channels) == len(set(
self.channels)), "Duplicate channel names are not allowed"
def log_info(self, metadata: pylsl.StreamInfo) -> None:
"""Log information about the current connections."""
log.debug(metadata.as_xml())
for marker_inlet in self._marker_inlets:
log.debug("Streaming from marker inlet: %s",
inlet_name(marker_inlet))
def marker_stream_names(self) -> List[str]:
return list(map(inlet_name, self._marker_inlets))
def read_data(self):
"""Reads the next packet and returns the sensor data.
Returns
-------
list with an item for each channel.
"""
sample, timestamp = self._inlet.pull_sample()
# Useful for debugging.
if LSL_TIMESTAMP in self._appended_channels:
sample.append(timestamp)
for marker_inlet in self._marker_inlets:
name = inlet_name(marker_inlet)
marker = self.current_markers.get(name, Marker().empty())
# Only attempt to retrieve a marker from the inlet if we have
# merged the last one with a sample.
if marker.is_empty:
# A timeout of 0.0 only returns a sample if one is buffered for
# immediate pickup. Without a timeout, this is a blocking call.
marker_data = marker_inlet.pull_sample(timeout=0.0)
marker = Marker(marker_data)
self.current_markers[name] = marker
if not marker.is_empty:
log.debug(
"Read marker %s from %s; current sample time: %s",
marker, name, timestamp)
trg = "0"
if not marker.is_empty and timestamp >= marker.timestamp:
trg = marker.trg
log.debug(("Appending %s marker %s to sample at time %s; ",
"time diff: %s"), name, marker, timestamp,
timestamp - marker.timestamp)
self.current_markers[name] = Marker.empty() # clear current
# Add marker field to sample
sample.append(trg)
return sample
| 37.937759
| 98
| 0.631084
|
119b24f7e581ff5f9dd8ad6ea105caaa42a9c5c6
| 455
|
py
|
Python
|
day-06/part-2/loic.py
|
lypnol/adventofcode-2017
|
03ced3df3eb80e5c7965c4120e3932919067cb15
|
[
"MIT"
] | 16
|
2017-12-02T11:56:25.000Z
|
2018-02-10T15:09:23.000Z
|
day-06/part-2/loic.py
|
lypnol/adventofcode-2017
|
03ced3df3eb80e5c7965c4120e3932919067cb15
|
[
"MIT"
] | 19
|
2017-12-01T07:54:22.000Z
|
2017-12-19T17:41:02.000Z
|
day-06/part-2/loic.py
|
lypnol/adventofcode-2017
|
03ced3df3eb80e5c7965c4120e3932919067cb15
|
[
"MIT"
] | 4
|
2017-12-04T23:58:12.000Z
|
2018-02-01T08:53:16.000Z
|
from submission import Submission
class LoicSubmission(Submission):
def run(self, s):
d = [int(x) for x in s.split("\t")]
t = {}
compteur = 0
while tuple(d) not in t.keys():
t[tuple(d)] = compteur
compteur += 1
buffer = max(d)
i = d.index(buffer)
d = self.data_update(d, i)
return compteur - t[tuple(d)]
def data_update(self, d, i):
c = d[i]
d[i] = 0
for k in range(0, c):
i = (i + 1) % 16
d[i] += 1
return d
| 17.5
| 37
| 0.569231
|
177595074e038a6ad903261608c35330f2e36c77
| 923
|
py
|
Python
|
bathroom/c.py
|
mahasak/codejam-2017
|
d04bc16602b0babc4995b05def695d245e3ef490
|
[
"MIT"
] | null | null | null |
bathroom/c.py
|
mahasak/codejam-2017
|
d04bc16602b0babc4995b05def695d245e3ef490
|
[
"MIT"
] | null | null | null |
bathroom/c.py
|
mahasak/codejam-2017
|
d04bc16602b0babc4995b05def695d245e3ef490
|
[
"MIT"
] | null | null | null |
import sys
def solve(n,k):
if k == 1:
if n == 1:
return [0,0]
else:
(l,x) = divmod(n-1, 2)
if x == 0:
m = l
else:
m = l + 1
return [m,l];
else:
(n1,m1) = divmod(n-1, 2)
(k1,m2) = divmod(k-1, 2)
if m1 == 0:
n2 = n1
else:
n2 = n1 + 1
if m2 == 0:
k2 = k1
else:
k2 = k1 + 1
if(n1 == n2):
print [n1,k2]
return solve(n1,k2)
else:
if k1 == k2:
print [n1,k1]
return solve(n1,k1)
else:
print [n2,k2]
return solve(n2,k2)
name = "C-large"
path = ""
f = open(name + ".in", 'r')
o = open(name + ".out", 'w')
T = long(f.readline().strip())
for t in xrange(T):
line = f.readline().strip().split(' ')
print line
(n,k) = map(long, line)
res = solve(n, k)
s = "Case #%d: %d %d\n" % (t+1, res[0] , res[1])
print s
o.write(s)
| 17.092593
| 52
| 0.419285
|
1704a40681413e074c4882fa4b34f8f11861354b
| 2,872
|
py
|
Python
|
tests/test_xonfig.py
|
con-f-use/xonsh
|
bff0794b68f0d9266d8d920f10170c982927ccf6
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tests/test_xonfig.py
|
con-f-use/xonsh
|
bff0794b68f0d9266d8d920f10170c982927ccf6
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
tests/test_xonfig.py
|
con-f-use/xonsh
|
bff0794b68f0d9266d8d920f10170c982927ccf6
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
"""Tests the xonfig command.
Actually, just a down payment on a full test.
Currently exercises only these options:
- xonfig info
- xonfig jupyter_kernel
"""
import io
import re
import pytest # noqa F401
from xonsh.webconfig import main as web_main
from xonsh.xonfig import xonfig_main
def test_xonfg_help(capsys, xession):
"""verify can invoke it, and usage knows about all the options"""
with pytest.raises(SystemExit):
xonfig_main(["-h"])
capout = capsys.readouterr().out
pat = re.compile(r"^usage:\s*xonfig[^\n]*{([\w,-]+)}", re.MULTILINE)
m = pat.match(capout)
assert m[1]
verbs = {v.strip().lower() for v in m[1].split(",")}
assert verbs == {
"info",
"styles",
"wizard",
"web",
"colors",
"tutorial",
}
@pytest.fixture
def request_factory():
class MockSocket:
def getsockname(self):
return ("sockname",)
def sendall(self, data):
self.data = data
class MockRequest:
_sock = MockSocket()
def __init__(self, path: str, method: str):
self._path = path
self.data = b""
self.method = method.upper()
def makefile(self, *args, **kwargs):
if args[0] == "rb":
return io.BytesIO(f"{self.method} {self._path} HTTP/1.0".encode())
elif args[0] == "wb":
return io.BytesIO(b"")
else:
raise ValueError("Unknown file type to make", args, kwargs)
def sendall(self, data):
self.data = data
return MockRequest
@pytest.fixture
def get_req(request_factory):
from urllib import parse
def factory(path, data: "dict[str, str]|None" = None):
if data:
path = path + "?" + parse.urlencode(data)
request = request_factory(path, "get")
handle = web_main.XonshConfigHTTPRequestHandler(request, (0, 0), None)
return request, handle, request.data.decode()
return factory
class TestXonfigWeb:
def test_colors_get(self, get_req):
_, _, resp = get_req("/")
assert "Colors" in resp
def test_xontribs_get(self, get_req):
_, _, resp = get_req("/xontribs")
assert "Xontribs" in resp
def test_prompts_get(self, get_req):
_, _, resp = get_req("/prompts")
assert "Prompts" in resp
@pytest.mark.parametrize(
"args",
[
([]),
(
[
"info",
]
),
],
)
def test_xonfig_info(args, xession):
"""info works, and reports no jupyter if none in environment"""
capout = xonfig_main(args)
assert capout.startswith("+---")
assert capout.endswith("---+\n")
pat = re.compile(r".*history backend\s+\|\s+", re.MULTILINE | re.IGNORECASE)
m = pat.search(capout)
assert m
| 25.192982
| 82
| 0.572772
|
0b424e1c4c035643aad07512c0b913ddcb3d2306
| 6,475
|
py
|
Python
|
rl_utils.py
|
iwangjian/ByteCup2018
|
c59c6a495f81c493eaaf7fda710c8acd7ef148b9
|
[
"MIT"
] | 80
|
2018-09-08T01:11:36.000Z
|
2022-01-18T13:41:30.000Z
|
rl_utils.py
|
Whoolly/ByteCup2018
|
348bdee3215c146ef7d6e4fe1fecbe4598798c8a
|
[
"MIT"
] | 3
|
2018-12-02T15:08:05.000Z
|
2020-02-10T04:11:28.000Z
|
rl_utils.py
|
Whoolly/ByteCup2018
|
348bdee3215c146ef7d6e4fe1fecbe4598798c8a
|
[
"MIT"
] | 21
|
2018-10-27T07:40:25.000Z
|
2022-03-28T12:30:01.000Z
|
""" RL training utilities"""
import math
from time import time
from datetime import timedelta
from cytoolz import concat
import numpy as np
import torch
from torch.nn import functional as F
from torch import autograd
from torch.nn.utils import clip_grad_norm_
from metric import compute_rouge_l, compute_rouge_n
from training import BasicPipeline
def a2c_validate(agent, abstractor, loader):
agent.eval()
start = time()
print('start running validation...', end='')
avg_reward = 0
i = 0
with torch.no_grad():
for art_batch, abs_batch in loader:
ext_sents = []
ext_inds = []
for raw_arts in art_batch:
indices = agent(raw_arts)
# ext_inds += [(len(ext_sents), len(indices)-1)]
ext_inds += [(len(ext_sents), len(indices))]
ext_sents += [raw_arts[idx.item()]
for idx in indices if idx.item() < len(raw_arts)]
all_summs = abstractor(ext_sents)
for (j, n), abs_sents in zip(ext_inds, abs_batch):
summs = all_summs[j:j+n]
# python ROUGE-1 (not official evaluation)
avg_reward += compute_rouge_n(list(concat(summs)),
list(concat(abs_sents)), n=1)
i += 1
avg_reward /= (i/100)
print('finished in {}! avg reward: {:.2f}'.format(
timedelta(seconds=int(time()-start)), avg_reward))
return {'reward': avg_reward}
def a2c_train_step(agent, abstractor, loader, opt, grad_fn,
gamma=0.99, reward_fn=compute_rouge_l,
stop_reward_fn=compute_rouge_n(n=1), stop_coeff=1.0):
opt.zero_grad()
indices = []
probs = []
baselines = []
ext_sents = []
art_batch, abs_batch = next(loader)
for raw_arts in art_batch:
(inds, ms), bs = agent(raw_arts)
baselines.append(bs)
indices.append(inds)
probs.append(ms)
ext_sents += [raw_arts[idx.item()]
for idx in inds if idx.item() < len(raw_arts)]
with torch.no_grad():
summaries = abstractor(ext_sents)
i = 0
rewards = []
avg_reward = 0
for inds, abss in zip(indices, abs_batch):
rs = ([reward_fn(summaries[i+j], abss[j])
# for j in range(min(len(inds)-1, len(abss)))]
for j in range(min(len(inds), len(abss)))]
# + [0 for _ in range(max(0, len(inds)-1-len(abss)))]
# + [stop_coeff*stop_reward_fn(
# list(concat(summaries[i:i+len(inds)-1])),
# list(concat(abss)))]
)
assert len(rs) == len(inds)
# avg_reward += rs[-1]/stop_coeff
avg_reward += rs[-1]
# i += len(inds)-1
i += len(inds)
# compute discounted rewards
R = 0
disc_rs = []
for r in rs[::-1]:
R = r + gamma * R
disc_rs.insert(0, R)
rewards += disc_rs
indices = list(concat(indices))
probs = list(concat(probs))
baselines = list(concat(baselines))
# standardize rewards
reward = torch.Tensor(rewards).to(baselines[0].device)
reward = (reward - reward.mean()) / (
reward.std() + float(np.finfo(np.float32).eps))
baseline = torch.cat(baselines).squeeze()
avg_advantage = 0
losses = []
for action, p, r, b in zip(indices, probs, reward, baseline):
advantage = r - b
avg_advantage += advantage
losses.append(-p.log_prob(action)
* (advantage/len(indices))) # divide by T*B
critic_loss = F.mse_loss(baseline, reward)
# backprop and update
autograd.backward(
[critic_loss.unsqueeze(0)] + losses,
[torch.ones(1).to(critic_loss.device)]*(1+len(losses))
)
grad_log = grad_fn()
opt.step()
log_dict = {}
log_dict.update(grad_log)
log_dict['reward'] = avg_reward/len(art_batch)
log_dict['advantage'] = avg_advantage.item()/len(indices)
log_dict['mse'] = critic_loss.item()
assert not math.isnan(log_dict['grad_norm'])
return log_dict
def get_grad_fn(agent, clip_grad, max_grad=1e2):
""" monitor gradient for each sub-component"""
params = [p for p in agent.parameters()]
def f():
grad_log = {}
for n, m in agent.named_children():
tot_grad = 0
for p in m.parameters():
if p.grad is not None:
tot_grad += p.grad.norm(2) ** 2
tot_grad = tot_grad ** (1/2)
grad_log['grad_norm'+n] = tot_grad.item()
grad_norm = clip_grad_norm_(
[p for p in params if p.requires_grad], clip_grad)
# grad_norm = grad_norm.item()
if max_grad is not None and grad_norm >= max_grad:
print('WARNING: Exploding Gradients {:.2f}'.format(grad_norm))
grad_norm = max_grad
grad_log['grad_norm'] = grad_norm
return grad_log
return f
class A2CPipeline(BasicPipeline):
def __init__(self, name,
net, abstractor,
train_batcher, val_batcher,
optim, grad_fn,
reward_fn, gamma,
stop_reward_fn, stop_coeff):
self.name = name
self._net = net
self._train_batcher = train_batcher
self._val_batcher = val_batcher
self._opt = optim
self._grad_fn = grad_fn
self._abstractor = abstractor
self._gamma = gamma
self._reward_fn = reward_fn
self._stop_reward_fn = stop_reward_fn
self._stop_coeff = stop_coeff
self._n_epoch = 0 # epoch not very useful?
def batches(self):
raise NotImplementedError('A2C does not use batcher')
def train_step(self):
# forward pass of model
self._net.train()
log_dict = a2c_train_step(
self._net, self._abstractor,
self._train_batcher,
self._opt, self._grad_fn,
self._gamma, self._reward_fn,
self._stop_reward_fn, self._stop_coeff
)
return log_dict
def validate(self):
return a2c_validate(self._net, self._abstractor, self._val_batcher)
def checkpoint(self, *args, **kwargs):
# explicitly use inherited function in case I forgot :)
return super().checkpoint(*args, **kwargs)
def terminate(self):
pass # No extra processs so do nothing
| 34.078947
| 79
| 0.57668
|
7563837b24e426cdfacd02440da5c55fc194bba4
| 1,223
|
py
|
Python
|
setup.py
|
remingu/napalm-gaia
|
7c8914b38d57f7e3274d0c07ae710eebdee59252
|
[
"Apache-2.0"
] | 2
|
2020-03-06T01:38:05.000Z
|
2020-06-03T23:52:15.000Z
|
setup.py
|
remingu/napalm-gaia
|
7c8914b38d57f7e3274d0c07ae710eebdee59252
|
[
"Apache-2.0"
] | 2
|
2020-02-28T14:00:09.000Z
|
2020-03-18T23:19:57.000Z
|
setup.py
|
remingu/napalm-gaia
|
7c8914b38d57f7e3274d0c07ae710eebdee59252
|
[
"Apache-2.0"
] | 2
|
2020-04-22T14:32:06.000Z
|
2021-02-19T02:06:08.000Z
|
from setuptools import setup, find_packages
from os import path
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6 is not supported')
lpath = path.abspath(path.dirname(__file__))
with open(path.join(lpath, 'README.md'), encoding='utf-8') as fh:
long_description = fh.read()
with open(path.join(lpath, 'requirements.txt'), "r") as fh:
reqs = [r for r in fh.read().splitlines() if len(r) > 0]
setup(
name='napalm-gaia',
version='0.0.15',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
py_modules=['gaiaos'],
url='https://github.com/remingu/napalm-gaia',
license='Apache 2.0',
author='Daniel Schlifka(remingu), Pavel Smejkal(mbtathcx>',
author_email='Daniel Schlifka <remingu@techturn.de>, Pavel Smejkal <sm3jkal@centrum.cz',
description='napalm driver plugin for checkpoint gaia-os',
install_requires=reqs,
keywords='development napalm checkpoint gaia ',
python_requires='>=3.6',
project_urls={
'Bug Reports': 'https://github.com/remingu/napalm-gaia/issues',
'Source': 'https://github.com/remingu/napalm-gaia',
},
include_package_data=True,
)
| 33.054054
| 92
| 0.690106
|
58294d1676c0ef0dac39744037a872b31e8afeaa
| 1,226
|
py
|
Python
|
pygaggle/data/convert_monot5_output_to_msmarco_run.py
|
Elfsong/pygaggle
|
2bb2f0eb9cc34ac626ffb7648b73b4695468eba7
|
[
"Apache-2.0"
] | 166
|
2020-04-24T10:15:34.000Z
|
2022-03-29T23:00:58.000Z
|
pygaggle/data/convert_monot5_output_to_msmarco_run.py
|
Elfsong/pygaggle
|
2bb2f0eb9cc34ac626ffb7648b73b4695468eba7
|
[
"Apache-2.0"
] | 151
|
2020-04-25T16:40:15.000Z
|
2022-03-12T01:00:02.000Z
|
pygaggle/data/convert_monot5_output_to_msmarco_run.py
|
Elfsong/pygaggle
|
2bb2f0eb9cc34ac626ffb7648b73b4695468eba7
|
[
"Apache-2.0"
] | 100
|
2020-04-24T20:53:13.000Z
|
2022-03-22T21:29:02.000Z
|
"""
This script convert monoT5 output file to msmarco run file
"""
import collections
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--t5_output", type=str, required=True,
help="tsv file with two columns, <label> and <score>")
parser.add_argument("--t5_output_ids", type=str, required=True,
help="tsv file with two columns, <query_id> and <doc_id>")
parser.add_argument("--mono_run", type=str, required=True,
help="path to output mono run, tsv file, with <query_id>, <doc_id> and <rank>")
args = parser.parse_args()
examples = collections.defaultdict(list)
with open(args.t5_output_ids) as f_gt, open(args.t5_output) as f_pred:
for line_gt, line_pred in zip(f_gt, f_pred):
query_id, doc_id = line_gt.strip().split('\t')
_, score = line_pred.strip().split('\t')
score = float(score)
examples[query_id].append((doc_id, score))
with open(args.mono_run, 'w') as fout:
for query_id, doc_ids_scores in examples.items():
doc_ids_scores.sort(key=lambda x: x[1], reverse=True)
for rank, (doc_id, _) in enumerate(doc_ids_scores):
fout.write(f'{query_id}\t{doc_id}\t{rank + 1}\n')
| 42.275862
| 99
| 0.663132
|
15a518c3791bf856c10ea93eab9f956e9767b6f5
| 2,484
|
py
|
Python
|
dask_cuml/tests/test_linear_regression.py
|
deltadu/dask-cuml
|
bafe79addf6d5e98401e990cc08e1ba176ba4362
|
[
"Apache-2.0"
] | 18
|
2019-03-08T05:26:50.000Z
|
2021-11-08T12:09:41.000Z
|
dask_cuml/tests/test_linear_regression.py
|
deltadu/dask-cuml
|
bafe79addf6d5e98401e990cc08e1ba176ba4362
|
[
"Apache-2.0"
] | 37
|
2019-03-08T01:18:02.000Z
|
2019-07-26T20:04:31.000Z
|
dask_cuml/tests/test_linear_regression.py
|
deltadu/dask-cuml
|
bafe79addf6d5e98401e990cc08e1ba176ba4362
|
[
"Apache-2.0"
] | 16
|
2019-03-07T23:50:00.000Z
|
2020-09-30T19:13:11.000Z
|
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import pytest
from sklearn.metrics import mean_squared_error
import pandas as pd
import gzip
import numpy as np
import os
def load_data(nrows, ncols, cached='data/mortgage.npy.gz'):
# Loading intop pandas to not create any clusters before LocalCUDACluster
if os.path.exists(cached):
print('use mortgage data')
with gzip.open(cached) as f:
X = np.load(f)
# the 4th column is 'adj_remaining_months_to_maturity'
# used as the label
X = X[:, [i for i in range(X.shape[1]) if i != 4]]
y = X[:, 4:5]
rindices = np.random.randint(0, X.shape[0]-1, nrows)
X = X[rindices, :ncols]
y = y[rindices]
else:
print('use random data')
X = np.random.rand(nrows, ncols)
df_X = pd.DataFrame({'fea%d' % i: X[:, i] for i in range(X.shape[1])})
df_y = pd.DataFrame({'fea%d' % i: y[:, i] for i in range(y.shape[1])})
return df_X, df_y
@pytest.mark.skip(reason="Test should be run only with libcuML.so")
def test_ols():
cluster = LocalCUDACluster(threads_per_worker=1)
client = Client(cluster)
import dask_cudf
import cudf
import numpy as np
from dask_cuml.linear_model import LinearRegression as cumlOLS_dask
nrows = 2**8
ncols = 399
X, y = load_data(nrows, ncols)
X_cudf = cudf.DataFrame.from_pandas(X)
y_cudf = np.array(y.as_matrix())
y_cudf = y_cudf[:, 0]
y_cudf = cudf.Series(y_cudf)
workers = client.has_what().keys()
X_df = dask_cudf.from_cudf(X_cudf, npartitions=len(workers)).persist()
y_df = dask_cudf.from_cudf(y_cudf, npartitions=len(workers)).persist()
lr = cumlOLS_dask()
lr.fit(X_df, y_df)
ret = lr.predict(X_df)
error_cuml = mean_squared_error(y, ret.compute().to_array())
assert(error_cuml < 1e-6)
| 28.883721
| 77
| 0.675523
|
1444f01e68cf4636417d471309fdf417ef5536f9
| 619
|
py
|
Python
|
fastreid/modeling/backbones/__init__.py
|
qslia/fast-reid
|
4508251d7439c6c20e8e2d9573c6123b1f388cc5
|
[
"Apache-2.0"
] | 2,194
|
2020-04-06T01:37:56.000Z
|
2022-03-30T22:17:28.000Z
|
fastreid/modeling/backbones/__init__.py
|
qslia/fast-reid
|
4508251d7439c6c20e8e2d9573c6123b1f388cc5
|
[
"Apache-2.0"
] | 542
|
2020-04-14T08:00:05.000Z
|
2022-03-29T07:39:40.000Z
|
fastreid/modeling/backbones/__init__.py
|
qslia/fast-reid
|
4508251d7439c6c20e8e2d9573c6123b1f388cc5
|
[
"Apache-2.0"
] | 667
|
2020-04-08T02:06:03.000Z
|
2022-03-29T00:57:32.000Z
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from .build import build_backbone, BACKBONE_REGISTRY
from .resnet import build_resnet_backbone
from .osnet import build_osnet_backbone
from .resnest import build_resnest_backbone
from .resnext import build_resnext_backbone
from .regnet import build_regnet_backbone, build_effnet_backbone
from .shufflenet import build_shufflenetv2_backbone
from .mobilenet import build_mobilenetv2_backbone
from .mobilenetv3 import build_mobilenetv3_backbone
from .repvgg import build_repvgg_backbone
from .vision_transformer import build_vit_backbone
| 32.578947
| 64
| 0.861066
|
a259a361be42a7caa7f72f962a8e6025c6ed6576
| 7,168
|
py
|
Python
|
homeassistant/components/netgear_lte/__init__.py
|
jamiewalters/home-assistant
|
83be1aed38a40b12a39055ad904e42277f9d3677
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/netgear_lte/__init__.py
|
jamiewalters/home-assistant
|
83be1aed38a40b12a39055ad904e42277f9d3677
|
[
"Apache-2.0"
] | 2
|
2019-04-15T02:43:04.000Z
|
2019-04-15T02:49:10.000Z
|
homeassistant/components/netgear_lte/__init__.py
|
jamiewalters/home-assistant
|
83be1aed38a40b12a39055ad904e42277f9d3677
|
[
"Apache-2.0"
] | 1
|
2019-06-19T07:43:11.000Z
|
2019-06-19T07:43:11.000Z
|
"""Support for Netgear LTE modems."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import attr
import voluptuous as vol
from homeassistant.const import (
CONF_HOST, CONF_MONITORED_CONDITIONS, CONF_NAME, CONF_PASSWORD,
CONF_RECIPIENT, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from . import sensor_types
REQUIREMENTS = ['eternalegypt==0.0.5']
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
DISPATCHER_NETGEAR_LTE = 'netgear_lte_update'
DOMAIN = 'netgear_lte'
DATA_KEY = 'netgear_lte'
EVENT_SMS = 'netgear_lte_sms'
SERVICE_DELETE_SMS = 'delete_sms'
ATTR_HOST = 'host'
ATTR_SMS_ID = 'sms_id'
ATTR_FROM = 'from'
ATTR_MESSAGE = 'message'
NOTIFY_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME, default=DOMAIN): cv.string,
vol.Optional(CONF_RECIPIENT, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT):
vol.All(cv.ensure_list, [vol.In(sensor_types.ALL)]),
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(NOTIFY_DOMAIN, default={}):
vol.All(cv.ensure_list, [NOTIFY_SCHEMA]),
vol.Optional(SENSOR_DOMAIN, default={}):
SENSOR_SCHEMA,
})])
}, extra=vol.ALLOW_EXTRA)
DELETE_SMS_SCHEMA = vol.Schema({
vol.Required(ATTR_HOST): cv.string,
vol.Required(ATTR_SMS_ID): vol.All(cv.ensure_list, [cv.positive_int]),
})
@attr.s
class ModemData:
"""Class for modem state."""
hass = attr.ib()
host = attr.ib()
modem = attr.ib()
data = attr.ib(init=False, default=None)
connected = attr.ib(init=False, default=True)
async def async_update(self):
"""Call the API to update the data."""
import eternalegypt
try:
self.data = await self.modem.information()
if not self.connected:
_LOGGER.warning("Connected to %s", self.host)
self.connected = True
except eternalegypt.Error:
if self.connected:
_LOGGER.warning("Lost connection to %s", self.host)
self.connected = False
self.data = None
async_dispatcher_send(self.hass, DISPATCHER_NETGEAR_LTE)
@attr.s
class LTEData:
"""Shared state."""
websession = attr.ib()
modem_data = attr.ib(init=False, factory=dict)
def get_modem_data(self, config):
"""Get modem_data for the host in config."""
return self.modem_data.get(config[CONF_HOST])
async def async_setup(hass, config):
"""Set up Netgear LTE component."""
if DATA_KEY not in hass.data:
websession = async_create_clientsession(
hass, cookie_jar=aiohttp.CookieJar(unsafe=True))
hass.data[DATA_KEY] = LTEData(websession)
async def delete_sms_handler(service):
"""Apply a service."""
host = service.data[ATTR_HOST]
conf = {CONF_HOST: host}
modem_data = hass.data[DATA_KEY].get_modem_data(conf)
if not modem_data:
_LOGGER.error(
"%s: host %s unavailable", SERVICE_DELETE_SMS, host)
return
for sms_id in service.data[ATTR_SMS_ID]:
await modem_data.modem.delete_sms(sms_id)
hass.services.async_register(
DOMAIN, SERVICE_DELETE_SMS, delete_sms_handler,
schema=DELETE_SMS_SCHEMA)
netgear_lte_config = config[DOMAIN]
# Set up each modem
tasks = [_setup_lte(hass, lte_conf) for lte_conf in netgear_lte_config]
await asyncio.wait(tasks)
# Load platforms for each modem
for lte_conf in netgear_lte_config:
# Notify
for notify_conf in lte_conf[NOTIFY_DOMAIN]:
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
CONF_NAME: notify_conf.get(CONF_NAME),
NOTIFY_DOMAIN: notify_conf,
}
hass.async_create_task(discovery.async_load_platform(
hass, NOTIFY_DOMAIN, DOMAIN, discovery_info, config))
# Sensor
sensor_conf = lte_conf.get(SENSOR_DOMAIN)
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
SENSOR_DOMAIN: sensor_conf,
}
hass.async_create_task(discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, discovery_info, config))
return True
async def _setup_lte(hass, lte_config):
"""Set up a Netgear LTE modem."""
import eternalegypt
host = lte_config[CONF_HOST]
password = lte_config[CONF_PASSWORD]
websession = hass.data[DATA_KEY].websession
modem = eternalegypt.Modem(hostname=host, websession=websession)
modem_data = ModemData(hass, host, modem)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
retry_task = hass.loop.create_task(
_retry_login(hass, modem_data, password))
@callback
def cleanup_retry(event):
"""Clean up retry task resources."""
if not retry_task.done():
retry_task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_retry)
async def _login(hass, modem_data, password):
"""Log in and complete setup."""
await modem_data.modem.login(password=password)
def fire_sms_event(sms):
"""Send an SMS event."""
data = {
ATTR_HOST: modem_data.host,
ATTR_SMS_ID: sms.id,
ATTR_FROM: sms.sender,
ATTR_MESSAGE: sms.message,
}
hass.bus.async_fire(EVENT_SMS, data)
await modem_data.modem.add_sms_listener(fire_sms_event)
await modem_data.async_update()
hass.data[DATA_KEY].modem_data[modem_data.host] = modem_data
async def cleanup(event):
"""Clean up resources."""
await modem_data.modem.logout()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
async def _update(now):
"""Periodic update."""
await modem_data.async_update()
async_track_time_interval(hass, _update, SCAN_INTERVAL)
async def _retry_login(hass, modem_data, password):
"""Sleep and retry setup."""
import eternalegypt
_LOGGER.warning(
"Could not connect to %s. Will keep trying", modem_data.host)
modem_data.connected = False
delay = 15
while not modem_data.connected:
await asyncio.sleep(delay)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
delay = min(2*delay, 300)
| 29.619835
| 75
| 0.664342
|
1f94992e3c6e75ab9fe6c7457f79a0589c44a540
| 8,972
|
py
|
Python
|
genrl/agents/deep/a2c/a2c.py
|
matrig/genrl
|
25eb018f18a9a1d0865c16e5233a2a7ccddbfd78
|
[
"MIT"
] | 390
|
2020-05-03T17:34:02.000Z
|
2022-03-05T11:29:07.000Z
|
genrl/agents/deep/a2c/a2c.py
|
matrig/genrl
|
25eb018f18a9a1d0865c16e5233a2a7ccddbfd78
|
[
"MIT"
] | 306
|
2020-05-03T05:53:53.000Z
|
2022-03-12T00:27:28.000Z
|
genrl/agents/deep/a2c/a2c.py
|
matrig/genrl
|
25eb018f18a9a1d0865c16e5233a2a7ccddbfd78
|
[
"MIT"
] | 64
|
2020-05-05T20:23:30.000Z
|
2022-03-30T08:43:10.000Z
|
from typing import Any, Dict
import gym
import torch
import torch.optim as opt
from torch.nn import functional as F
from genrl.agents.deep.base import OnPolicyAgent
from genrl.utils import (
compute_returns_and_advantage,
get_env_properties,
get_model,
safe_mean,
)
class A2C(OnPolicyAgent):
"""Advantage Actor Critic algorithm (A2C)
The synchronous version of A3C
Paper: https://arxiv.org/abs/1602.01783
Attributes:
network (str): The network type of the Q-value function.
Supported types: ["cnn", "mlp"]
env (Environment): The environment that the agent is supposed to act on
create_model (bool): Whether the model of the algo should be created when initialised
batch_size (int): Mini batch size for loading experiences
gamma (float): The discount factor for rewards
layers (:obj:`tuple` of :obj:`int`): Layers in the Neural Network
of the Q-value function
shared_layers(:obj:`tuple` of :obj:`int`): Sizes of shared layers in Actor Critic if using
lr_policy (float): Learning rate for the policy/actor
lr_value (float): Learning rate for the critic
rollout_size (int): Capacity of the Replay Buffer
buffer_type (str): Choose the type of Buffer: ["rollout"]
noise (:obj:`ActionNoise`): Action Noise function added to aid in exploration
noise_std (float): Standard deviation of the action noise distribution
value_coeff (float): Ratio of magnitude of value updates to policy updates
entropy_coeff (float): Ratio of magnitude of entropy updates to policy updates
seed (int): Seed for randomness
render (bool): Should the env be rendered during training?
device (str): Hardware being used for training. Options:
["cuda" -> GPU, "cpu" -> CPU]
"""
def __init__(
self,
*args,
noise: Any = None,
noise_std: float = 0.1,
value_coeff: float = 0.5,
entropy_coeff: float = 0.01,
**kwargs
):
super(A2C, self).__init__(*args, **kwargs)
self.noise = noise
self.noise_std = noise_std
self.value_coeff = value_coeff
self.entropy_coeff = entropy_coeff
self.empty_logs()
if self.create_model:
self._create_model()
def _create_model(self) -> None:
"""Function to initialize Actor-Critic architecture
This will create the Actor-Critic net for the agent and initialise the action noise
"""
state_dim, action_dim, discrete, action_lim = get_env_properties(
self.env, self.network
)
if isinstance(self.network, str):
arch_type = self.network
if self.shared_layers is not None:
arch_type += "s"
self.ac = get_model("ac", arch_type)(
state_dim,
action_dim,
shared_layers=self.shared_layers,
policy_layers=self.policy_layers,
value_layers=self.value_layers,
val_type="V",
discrete=discrete,
action_lim=action_lim,
).to(self.device)
else:
self.ac = self.network.to(self.device)
if self.noise is not None:
self.noise = self.noise(
torch.zeros(action_dim), self.noise_std * torch.ones(action_dim)
)
actor_params, critic_params = self.ac.get_params()
self.optimizer_policy = opt.Adam(critic_params, lr=self.lr_policy)
self.optimizer_value = opt.Adam(actor_params, lr=self.lr_value)
def select_action(
self, state: torch.Tensor, deterministic: bool = False
) -> torch.Tensor:
"""Select action given state
Action Selection for On Policy Agents with Actor Critic
Args:
state (:obj:`torch.Tensor`): Current state of the environment
deterministic (bool): Should the policy be deterministic or stochastic
Returns:
action (:obj:`torch.Tensor`): Action taken by the agent
value (:obj:`torch.Tensor`): Value of given state
log_prob (:obj:`torch.Tensor`): Log probability of selected action
"""
# create distribution based on actor output
action, dist = self.ac.get_action(state, deterministic=deterministic)
value = self.ac.get_value(state)
return action.detach(), value, dist.log_prob(action).cpu()
def get_traj_loss(self, values: torch.Tensor, dones: torch.Tensor) -> None:
"""Get loss from trajectory traversed by agent during rollouts
Computes the returns and advantages needed for calculating loss
Args:
values (:obj:`torch.Tensor`): Values of states encountered during the rollout
dones (:obj:`list` of bool): Game over statuses of each environment
"""
compute_returns_and_advantage(
self.rollout, values.detach().cpu().numpy(), dones.cpu().numpy()
)
def evaluate_actions(self, states: torch.Tensor, actions: torch.Tensor):
"""Evaluates actions taken by actor
Actions taken by actor and their respective states are analysed to get
log probabilities and values from critics
Args:
states (:obj:`torch.Tensor`): States encountered in rollout
actions (:obj:`torch.Tensor`): Actions taken in response to respective states
Returns:
values (:obj:`torch.Tensor`): Values of states encountered during the rollout
log_probs (:obj:`torch.Tensor`): Log of action probabilities given a state
"""
states, actions = states.to(self.device), actions.to(self.device)
_, dist = self.ac.get_action(states, deterministic=False)
values = self.ac.get_value(states)
return values, dist.log_prob(actions).cpu(), dist.entropy().cpu()
def update_params(self) -> None:
"""Updates the the A2C network
Function to update the A2C actor-critic architecture
"""
for rollout in self.rollout.get(self.batch_size):
actions = rollout.actions
if isinstance(self.env.action_space, gym.spaces.Discrete):
actions = actions.long().flatten()
values, log_prob, entropy = self.evaluate_actions(
rollout.observations, actions
)
policy_loss = rollout.advantages * log_prob
policy_loss = -torch.mean(policy_loss)
self.logs["policy_loss"].append(policy_loss.item())
value_loss = self.value_coeff * F.mse_loss(rollout.returns, values.cpu())
self.logs["value_loss"].append(torch.mean(value_loss).item())
entropy_loss = -torch.mean(entropy) # Change this to entropy
self.logs["policy_entropy"].append(entropy_loss.item())
actor_loss = policy_loss + self.entropy_coeff * entropy_loss
self.optimizer_policy.zero_grad()
actor_loss.backward()
torch.nn.utils.clip_grad_norm_(self.ac.actor.parameters(), 0.5)
self.optimizer_policy.step()
self.optimizer_value.zero_grad()
value_loss.backward()
torch.nn.utils.clip_grad_norm_(self.ac.critic.parameters(), 0.5)
self.optimizer_value.step()
def get_hyperparams(self) -> Dict[str, Any]:
"""Get relevant hyperparameters to save
Returns:
hyperparams (:obj:`dict`): Hyperparameters to be saved
weights (:obj:`torch.Tensor`): Neural network weights
"""
hyperparams = {
"network": self.network,
"batch_size": self.batch_size,
"gamma": self.gamma,
"lr_policy": self.lr_policy,
"lr_value": self.lr_value,
"rollout_size": self.rollout_size,
}
return hyperparams, self.ac.state_dict()
def _load_weights(self, weights) -> None:
"""Load weights for the agent from pretrained model
Args:
weights (:obj:`torch.Tensor`): neural net weights
"""
self.ac.load_state_dict(weights)
def get_logging_params(self) -> Dict[str, Any]:
"""Gets relevant parameters for logging
Returns:
logs (:obj:`dict`): Logging parameters for monitoring training
"""
logs = {
"policy_loss": safe_mean(self.logs["policy_loss"]),
"value_loss": safe_mean(self.logs["value_loss"]),
"policy_entropy": safe_mean(self.logs["policy_entropy"]),
"mean_reward": safe_mean(self.rewards),
}
self.empty_logs()
return logs
def empty_logs(self):
"""Empties logs"""
self.logs = {}
self.logs["policy_loss"] = []
self.logs["value_loss"] = []
self.logs["policy_entropy"] = []
self.rewards = []
| 37.383333
| 98
| 0.618034
|
93ef64d57bdefee99ddc0b9a65fbb5d93b4491a2
| 409
|
py
|
Python
|
src/code-challenges/codewars/6KYU/spinWords/spin_words.py
|
maltewirz/code-challenges
|
97777b10963f19bc587ddd984f0526b221c081f8
|
[
"MIT"
] | 1
|
2020-08-30T07:52:20.000Z
|
2020-08-30T07:52:20.000Z
|
src/code-challenges/codewars/6KYU/spinWords/spin_words.py
|
maltewirz/code-challenges
|
97777b10963f19bc587ddd984f0526b221c081f8
|
[
"MIT"
] | 6
|
2020-08-12T07:05:04.000Z
|
2021-08-23T06:10:10.000Z
|
src/code-challenges/codewars/6KYU/spinWords/spin_words.py
|
maltewirz/code-challenges
|
97777b10963f19bc587ddd984f0526b221c081f8
|
[
"MIT"
] | null | null | null |
# https://www.codewars.com/kata/5264d2b162488dc400000001/discuss/python
def spin_words(sentence):
wordList = sentence.split()
result = []
for word in wordList:
if len(word) >= 5:
result.append(word[::-1])
else:
result.append(word)
return " ".join(result)
# Alternative:
# return " ".join([x[::-1] if len(x) >= 5 else x for x in sentence.split(" ")])
| 25.5625
| 79
| 0.591687
|
b43673ad24de6ae908850009df8fde50c0784ee8
| 605
|
py
|
Python
|
accounts/views.py
|
vansjyo/OSVI-RemoteControl
|
6d3dd6aa1cceac2254171d57b33975df08cda2a8
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
vansjyo/OSVI-RemoteControl
|
6d3dd6aa1cceac2254171d57b33975df08cda2a8
|
[
"MIT"
] | null | null | null |
accounts/views.py
|
vansjyo/OSVI-RemoteControl
|
6d3dd6aa1cceac2254171d57b33975df08cda2a8
|
[
"MIT"
] | null | null | null |
# accounts/views.py
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
from django.contrib.auth import logout
from django.shortcuts import render
class SignUp(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
def single_user(request):
logout(request)
return render(request, 'single_user.html')
def time_up(request):
logout(request)
return render(request, 'time_up.html')
def team(request):
return render(request, 'team.html')
| 24.2
| 54
| 0.761983
|
569b61dfd0395059880f696b302b0ac634b1cc99
| 118
|
py
|
Python
|
tests/context.py
|
ctcutler/dotty
|
64d8dc47833e3a7ee73e26b1d2075c8f33b89601
|
[
"MIT"
] | null | null | null |
tests/context.py
|
ctcutler/dotty
|
64d8dc47833e3a7ee73e26b1d2075c8f33b89601
|
[
"MIT"
] | null | null | null |
tests/context.py
|
ctcutler/dotty
|
64d8dc47833e3a7ee73e26b1d2075c8f33b89601
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dotty
| 19.666667
| 82
| 0.728814
|
fd0f0f500b9fe3a7c5b74b781968d71e6026afe6
| 61,450
|
py
|
Python
|
src/signalalign/hiddenMarkovModel.py
|
kishwarshafin/signalAlign
|
c9b7b9232ef6fb76aa427670981c969b887f4860
|
[
"MIT"
] | null | null | null |
src/signalalign/hiddenMarkovModel.py
|
kishwarshafin/signalAlign
|
c9b7b9232ef6fb76aa427670981c969b887f4860
|
[
"MIT"
] | null | null | null |
src/signalalign/hiddenMarkovModel.py
|
kishwarshafin/signalAlign
|
c9b7b9232ef6fb76aa427670981c969b887f4860
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""hiddenMarkovModel.py contains objects for handling HMMs for SignalAlign"""
########################################################################
# File: hiddenMarkovModel.py
# executable: hiddenMarkovModel.py
#
# Author: Andrew Bailey
# History: 08/10/18 Created
########################################################################
from __future__ import print_function
import sys
import os
import numpy as np
import pandas as pd
import tempfile
from itertools import product
from scipy.stats import norm, invgauss, entropy
from scipy.spatial.distance import euclidean
from sklearn.neighbors import KernelDensity
from py3helpers.utils import all_string_permutations
from py3helpers.seq_tools import is_non_canonical_iupac_base
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
mpl.use("TkAgg")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Globals
NORM_DIST_PARAMS = 2
NB_MODEL_PARAMS = 5
_SQRT2 = np.sqrt(2)
def parse_assignment_file(file_path):
"""Parse the .assignments.tsv output file from signalAlign:
:param file_path: path to assignments file
:return: panda DataFrame with column names "kmer", "strand", "level_mean", "prob"
"""
data = pd.read_csv(file_path, delimiter="\t",
usecols=(0, 1, 2, 3),
names=["kmer", "strand", "level_mean", "prob"],
dtype={"kmer": np.str, "strand": np.str, "level_mean": np.float64, "prob": np.float64},
header=None)
return data
def read_in_alignment_file(file_path):
"""Parse the buildAlignment.tsv output file from CreateHdpTrainingData
:param file_path: path to alignment file
:return: panda DataFrame with column names "kmer", "strand", "level_mean", "prob"
"""
assert os.path.exists(file_path), "File path does not exist: {}".format(file_path)
data = pd.read_csv(file_path, delimiter="\t",
names=['contig', 'reference_index',
'reference_kmer', 'read_file',
'strand', 'event_index',
'event_mean', 'event_noise',
'event_duration', 'aligned_kmer',
'scaled_mean_current', 'scaled_noise',
'posterior_probability', 'descaled_event_mean',
'ont_model_mean', 'path_kmer'],
dtype={'contig': np.str, 'reference_index': np.int64,
'reference_kmer': np.str, 'read_file': np.str,
'strand': np.str, 'event_index': np.int64,
'event_mean': np.float64, 'event_noise': np.float64,
'event_duration': np.float64, 'aligned_kmer': np.str,
'scaled_mean_current': np.float64, 'scaled_noise': np.float64,
'posterior_probability': np.float64, 'descaled_event_mean': np.float64,
'ont_model_mean': np.float64, 'path_kmer': np.str},
header=None)
return data
def parse_alignment_file(file_path):
"""Parse the buildAlignment.tsv output file from CreateHdpTrainingData
:param file_path: path to alignment file
:return: panda DataFrame with column names "kmer", "strand", "level_mean", "prob"
"""
assert os.path.exists(file_path), "File path does not exist: {}".format(file_path)
data = pd.read_csv(file_path, delimiter="\t",
usecols=(4, 9, 13, 12),
names=["strand", "kmer", "prob", "level_mean"],
dtype={"kmer": np.str, "strand": np.str, "level_mean": np.float64, "prob": np.float64},
header=None)[["kmer", "strand", "level_mean", "prob"]]
return data
class HmmModel(object):
def __init__(self, ont_model_file, hdp_model_file=None, nanopolish_model_file=None, rna=False, name=None):
# TODO Need to create docs here
assert os.path.exists(ont_model_file)
self.name = name
self.rna = rna
self.ont_model_file = ont_model_file
self.match_model_params = 5 # level_mean, level_sd, noise_mean, noise_sd, noise_lambda
self.state_number = 3
self.transitions = np.zeros(self.state_number ** 2)
self.transitions_expectations = np.zeros(self.state_number ** 2)
self.likelihood = 0.0
self.running_likelihoods = []
self.alphabet_size = 0
self.alphabet = ""
self.kmer_length = 0
self.has_ont_model = False
self.has_nanopolish_model = False
self.normalized = False
self.sorted_kmer_tuple = tuple()
self.num_kmers = 0
# HDP stuff here
self.kmer_assignments = []
self.event_assignments = []
self.assignments_record = []
self.symbol_set_size = 0
self.linspace = np.linspace(30, 160, num=2000)
# event model for describing normal distributions for each kmer
self.event_model = {"means": np.zeros(self.symbol_set_size),
"SDs": np.zeros(self.symbol_set_size),
"noise_means": np.zeros(self.symbol_set_size),
"noise_SDs": np.zeros(self.symbol_set_size),
"noise_lambdas": np.zeros(self.symbol_set_size)}
self.set_default_transitions()
# bins for expectations
self.load_model(self.ont_model_file)
self.mean_expectations = np.zeros(self.symbol_set_size)
self.sd_expectations = np.zeros(self.symbol_set_size)
self.posteriors = np.zeros(self.symbol_set_size)
self.observed = np.zeros(self.symbol_set_size, dtype=bool)
self.has_hdp_model = False
# Load HDP model if passed
if hdp_model_file:
assert os.path.exists(hdp_model_file)
self.hdp_path = hdp_model_file
self.splines_finalized = False
self.has_data = False
self.sample_gamma = False
self.num_dps = 0
self.mu = 0
self.nu = 0
self.alpha = 0
self.beta = 0
self.grid_start = 0
self.grid_stop = 0
self.grid_length = 0
self.gamma_alpha = 0
self.gamma_beta = 0
self.w = 0
self.s = 0
self.all_posterior_pred = []
self.all_spline_slopes = []
self._initialize_hdp_model()
self.nanopolish_model_file = nanopolish_model_file
if self.nanopolish_model_file:
assert os.path.exists(self.nanopolish_model_file)
self.nanopolish_event_model = {}
self._load_nanopolish_model(self.nanopolish_model_file)
def _load_nanopolish_model(self, model_file):
"""Load HMM model from nanopolish model file
the model file has the format:
1st couple lines have # : #ont_model_name r9.4_180mv_450bps_6mer
#kit r9.4_450bps
#strand template
#k 6
#original_file r9.4_180mv_450bps_6mer/template_median68pA.model
header line: kmer level_mean level_stdv sd_mean sd_stdv weight
:param model_file: path to model file
"""
self.nanopolish_event_model, alphabet, k = load_nanopolish_model(model_file)
assert alphabet == self.alphabet, "Nanopolish model alphabet does not match signalalign model. sa {} != np {}".format(alphabet, self.alphabet)
assert k == self.kmer_length, "Nanopolish model kmer length does not match signalalign model: sa {} != np {}".format(k, self.kmer_length)
return self.nanopolish_event_model
def normalize_transitions_expectations(self):
"""Normalize transitions from each state to the other states
eg: MATCH_CONTINUE = MATCH_CONTINUE / (GAP_OPEN_Y + GAP_OPEN_X + MATCH_CONTINUE)
"""
for from_state in range(self.state_number):
i = self.state_number * from_state
j = sum(self.transitions_expectations[i:i + self.state_number])
for to_state in range(self.state_number):
self.transitions_expectations[i + to_state] = self.transitions_expectations[i + to_state] / j
def set_default_transitions(self):
MATCH_CONTINUE = np.exp(-0.23552123624314988) # stride
GAP_OPEN_X = np.exp(-1.6269694202638481) # skip
GAP_OPEN_Y = np.exp(-4.3187242127300092) # 1 - (skip + stride)
MATCH_FROM_GAP_X = np.exp(-0.21880828092192281) # 1 - skip'
GAP_EXTEND_X = np.exp(-1.6269694202638481) # skip'
GAP_SWITCH_TO_Y = 0.0
GAP_EXTEND_Y = np.exp(-4.3187242127239411) # stay (1 - (skip + stay))
MATCH_FROM_GAP_Y = np.exp(-0.013406326748077823) # 1 - (skip + stay)
GAP_SWITCH_TO_X = 0.000000001
self.transitions = [
MATCH_CONTINUE, GAP_OPEN_X, GAP_OPEN_Y,
MATCH_FROM_GAP_X, GAP_EXTEND_X, GAP_SWITCH_TO_Y,
MATCH_FROM_GAP_Y, GAP_SWITCH_TO_X, GAP_EXTEND_Y
]
return
def check_header_line(self, line, expectations_file):
"""Make sure that the header line of an expectations file matches the model we are training
:param line: split header line. eg: ['3', '4', "ACGT", '5']
:param expectations_file: path to expectations file for error reporting
:return: True if assert statements pass
"""
assert len(line) == 4, "signalHmm.check_header_line - incorrect header (param line): {}".format(
expectations_file)
assert int(line[0]) == self.state_number, "signalHmm.check_header_line - state number error should be {exp} " \
"got {obs}".format(exp=self.state_number, obs=line[0])
assert int(line[1]) == self.alphabet_size, "signalHmm.check_header_line - alphabet size error incorrect " \
"parameters: {file}, line {line}".format(file=expectations_file,
line=''.join(line))
assert line[2] == self.alphabet, "signalHmm.check_header_line - incorrect parameters: {file}, line {line}" \
"".format(file=expectations_file, line=''.join(line))
assert int(line[3]) == self.kmer_length, "signalHmm.check_header_line - incorrect parameters: {file}, " \
"line {line}".format(file=expectations_file, line=''.join(line))
return True
def load_model(self, model_file):
"""Load HMM model from model file
the model file has the format:
line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
line 1: match->match \t match->gapX \t match->gapY \t
gapX->match \t gapX->gapX \t gapX->gapY \t
gapY->match \t gapY->gapX \t gapY->gapY \n
line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
:param model_file: path to model file
"""
assert os.path.exists(model_file), "signalHmm.load_model - didn't find model here: {}".format(model_file)
with open(model_file, 'r') as fH:
line = fH.readline().split()
# check for correct header length
assert len(line) == 4, "signalHmm.load_model - incorrect line length line:{}".format(''.join(line))
# check stateNumber
assert int(
line[0]) == self.state_number, "signalHmm.load_model - incorrect stateNumber got {got} should be {exp}" \
"".format(got=int(line[0]), exp=self.state_number)
# load model parameters
self.alphabet_size = int(line[1])
self.alphabet = line[2]
self.kmer_length = int(line[3])
self.symbol_set_size = self.alphabet_size ** self.kmer_length
assert self.symbol_set_size > 0, "signalHmm.load_model - Got 0 for symbol_set_size"
assert self.symbol_set_size <= 6 ** 6, "signalHmm.load_model - Got more than 6^6 for symbol_set_size got {}" \
"".format(self.symbol_set_size)
line = list(map(float, fH.readline().split()))
assert len(line) == len(self.transitions) + 1, "signalHmm.load_model incorrect transitions line"
self.transitions = line[:-1]
self.likelihood = line[-1]
line = list(map(float, fH.readline().split()))
assert len(line) == self.symbol_set_size * NB_MODEL_PARAMS, \
"signalHmm.load_model incorrect event model line"
self.event_model["means"] = line[::NB_MODEL_PARAMS]
self.event_model["SDs"] = line[1::NB_MODEL_PARAMS]
self.event_model["noise_means"] = line[2::NB_MODEL_PARAMS]
self.event_model["noise_SDs"] = line[3::NB_MODEL_PARAMS]
self.event_model["noise_lambdas"] = line[4::NB_MODEL_PARAMS]
assert not np.any(self.event_model["means"] == 0.0), "signalHmm.load_model, this model has 0 E_means"
assert not np.any(self.event_model["SDs"] == 0.0), "signalHmm.load_model, this model has 0 E_means"
assert not np.any(
self.event_model["noise_means"] == 0.0), "signalHmm.load_model, this model has 0 E_noise_means"
assert not np.any(
self.event_model["noise_SDs"] == 0.0), "signalHmm.load_model, this model has 0 E_noise_SDs"
self._create_kmer_index_map()
self.has_ont_model = True
def write(self, out_file):
"""Write out model file to out_file path
:param out_file: path to write hmm model file
"""
# the model file has the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
assert self.has_ont_model, "Shouldn't be writing down a Hmm that has no Model"
assert self.normalized, "Shouldn't be writing down a not normalized HMM"
with open(out_file, 'w') as f:
# line 0
f.write("{stateNumber}\t{alphabetSize}\t{alphabet}\t{kmerLength}\n"
"".format(stateNumber=self.state_number, alphabetSize=self.alphabet_size,
alphabet=self.alphabet, kmerLength=self.kmer_length))
# line 1 transitions
for i in range(self.state_number * self.state_number):
f.write("{transition}\t".format(transition=str(self.transitions[i])))
# likelihood
f.write("{}\n".format(str(self.likelihood)))
# line 2 Event Model
for k in range(self.symbol_set_size):
f.write("{level_mean}\t{level_sd}\t{noise_mean}\t{noise_sd}\t{noise_lambda}\t"
"".format(level_mean=self.event_model["means"][k], level_sd=self.event_model["SDs"][k],
noise_mean=self.event_model["noise_means"][k],
noise_sd=self.event_model["noise_SDs"][k],
noise_lambda=self.event_model["noise_lambdas"][k]))
f.write("\n")
@staticmethod
def _get_kmer_index(kmer, alphabet, kmer_length, alphabet_size):
"""Get the model index for a given kmer
ex: get_kmer_index(AAAAA) = 0
:param kmer: nucleotide sequence
"""
assert set(kmer).issubset(set(alphabet)) is True, "Nucleotide not found in model alphabet: kmer={}, " \
"alphabet={}".format(kmer, alphabet)
assert len(kmer) == kmer_length, "Kmer ({}) length does not match model kmer length: {}".format(kmer,
kmer_length)
alphabet_dict = {base: index for index, base in enumerate(sorted(alphabet))}
kmer_index = 0
for index, nuc in enumerate(kmer):
kmer_index += alphabet_dict[nuc] * (alphabet_size ** (kmer_length - index - 1))
return kmer_index
def get_kmer_index(self, kmer):
"""Get the model index for a given kmer
ex: get_kmer_index(AAAAA) = 0
:param kmer: nucleotide sequence
"""
return self._get_kmer_index(kmer, self.alphabet, self.kmer_length, self.alphabet_size)
def _create_kmer_index_map(self):
"""Create the kmer_to_index_map and index_to_kmer_map"""
sorted_kmer_list = []
for i, kmer_list in enumerate(product(self.alphabet, repeat=self.kmer_length)):
sorted_kmer_list.append(''.join(kmer_list))
self.sorted_kmer_tuple = tuple(sorted_kmer_list)
self.num_kmers = len(self.sorted_kmer_tuple)
return self.sorted_kmer_tuple
def index_to_kmer(self, index):
"""Get kmer from a given index
ex: index_to_kmer(0) = "AAAAA"
:param index: number representing kmer
"""
assert index < self.num_kmers, \
"The kmer index is out of bounds given the alphabet and kmer length. {} > {}".format(index, self.num_kmers)
return self.sorted_kmer_tuple[index]
def get_event_mean_gaussian_parameters(self, kmer, nanopolish=False):
"""Get the model's Normal distribution parameters to model the mean of a specific kmer
:param kmer: kmer that can fit in model
"""
kmer_index = self.get_kmer_index(kmer)
if nanopolish:
normal_mean = self.nanopolish_event_model["means"][kmer_index]
normal_sd = self.nanopolish_event_model["SDs"][kmer_index]
else:
normal_mean = self.event_model["means"][kmer_index]
normal_sd = self.event_model["SDs"][kmer_index]
return normal_mean, normal_sd
def get_event_sd_inv_gaussian_parameters(self, kmer, nanopolish=False):
"""Get the model's inverse gaussian distribution parameters to model the mean of a specific kmer
:param kmer: kmer that can fit in model
"""
kmer_index = self.get_kmer_index(kmer)
inv_gauss_mean = self.event_model["noise_means"][kmer_index]
inv_gauss_lambda = self.event_model["noise_lambdas"][kmer_index]
return inv_gauss_mean, inv_gauss_lambda
def log_event_mean_gaussian_probability_match(self, event_mean, kmer, nanopolish=False):
"""Get the probability of the event_mean coming from the model's kmer gaussian/normal distribution
:param event_mean: mean of event
:param kmer: nucleotide sequence to check
"""
normal_mean, normal_sd = self.get_event_mean_gaussian_parameters(kmer, nanopolish=nanopolish)
return norm.logpdf(event_mean, normal_mean, normal_sd)
def log_event_sd_inv_gaussian_probability_match(self, event_sd, kmer):
"""Get the probability of the event_sd coming from the model's kmer inv-gaussian distribution
:param event_sd: sd of event
:param kmer: kmer for model distribution selection
"""
inv_gauss_mean, inv_gauss_lambda = self.get_event_sd_inv_gaussian_parameters(kmer)
return invgauss(inv_gauss_mean / inv_gauss_lambda, scale=inv_gauss_lambda).logpdf(event_sd)
def add_expectations_file(self, expectations_file):
"""Add expectations file to the HMM. This is used for generating expectations of transition probabilities or
emission probabilities
expectations files have the format:
line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
line 1: match->match \t match->gapX \t match->gapY \t
gapX->match \t gapX->gapX \t gapX->gapY \t
gapY->match \t gapY->gapX \t gapY->gapY \n
line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
line 3: event expectations [mean] [sd] / kmer \n
line 4: posteriors 1 per kmer \n
line 5: observed 1 per kmer \n
:param expectations_file: path to signalAlign expectations file
:return: True if expectations file was in correct format
"""
if not os.path.exists(expectations_file) or os.stat(expectations_file).st_size == 0:
print("Empty or missing file {}".format(expectations_file), file=sys.stderr)
return False
with open(expectations_file, 'r') as fH:
# line 0
line = fH.readline().split()
self.check_header_line(line=line, expectations_file=expectations_file)
# line 1: transitions, likelihood
# check if valid
line = list(map(float, fH.readline().split()))
assert len(line) == (len(self.transitions) + 1), \
"HMM.add_expectations_file - problem with file {f} " \
"transitions line {l}, incorrect length".format(f=expectations_file, l=''.join(line))
self.likelihood += line[-1]
self.transitions_expectations = [sum(x) for x in zip(self.transitions_expectations, line[0:-1])]
# line 2: event model
line = list(map(float, fH.readline().split()))
assert len(line) == self.symbol_set_size * NB_MODEL_PARAMS, "HMM.add_expectations_file - problem with " \
"event model in file {ef}".format(ef=expectations_file)
# line 3 event expectations [E_mean, E_sd]
line = list(map(float, fH.readline().split()))
assert len(line) == self.symbol_set_size * NORM_DIST_PARAMS, \
'HMM: check_file - bad file (event expectations): {}'.format(expectations_file)
self.event_assignments += line
self.mean_expectations = [i + j for i, j in zip(self.mean_expectations, line[::NORM_DIST_PARAMS])]
self.sd_expectations = [i + j for i, j in zip(self.sd_expectations, line[1::NORM_DIST_PARAMS])]
# line 4, posteriors
line = list(map(float, fH.readline().split()))
assert len(line) == self.symbol_set_size, "HMM: check_file - bad file (posteriors): {}".format(expectations_file)
self.kmer_assignments += line
# line 5, probabilities
self.posteriors = [sum(x) for x in zip(self.posteriors, line)]
line = list(map(bool, fH.readline().split()))
assert len(line) == self.symbol_set_size, "HMM: check_file - bad file (observations): {}".format(expectations_file)
self.observed = [any(b) for b in zip(self.observed, line)]
return True
def normalize(self, update_transitions, update_emissions):
"""Normalize the transitions and emission probabilities
:param update_transitions: boolean option to update transitions
:param update_emissions: boolean option to update emissions
"""
# update
if update_transitions is True:
# normalize transitions expectations
self.normalize_transitions_expectations()
for i in range(self.state_number ** 2):
self.transitions[i] = self.transitions_expectations[i]
# calculate the new expected mean and standard deviation for the kmer normal distributions
if update_emissions:
# print(self.observed)
for k in range(self.symbol_set_size):
# print(k)
if self.observed[k] is True:
u_k = self.mean_expectations[k] / self.posteriors[k]
o_k = np.sqrt(self.sd_expectations[k] / self.posteriors[k])
if u_k > 0:
self.event_model["means"][k] = u_k
self.event_model["SDs"][k] = o_k
else:
continue
self.normalized = True
def reset_assignments(self):
"""Keep track of number of event assignments processed and reset event and kmer assignments"""
self.assignments_record.append(len(self.event_assignments))
self.event_assignments = []
self.kmer_assignments = []
def add_and_normalize_expectations(self, files, hmm_file, update_transitions=True, update_emissions=False):
"""Add expectations file to HMM model and update transitions. Emissions are currently unable to be updated
:param files: list of 'expectation' files to add to model
:param hmm_file: path to HMM file to write new model
:param update_transitions: boolean option to update transitions
:param update_emissions: boolean option to update emissions
"""
if update_emissions is False and update_transitions is False:
print("[trainModels] NOTICE: Training transitions by default\n", file=sys.stderr)
update_transitions = True
# reset model likelihood and keep track of passing and failing files
self.likelihood = 0
files_added_successfully = 0
files_with_problems = 0
for f in files:
try:
success = self.add_expectations_file(f)
if success:
files_added_successfully += 1
os.remove(f)
else:
files_with_problems += 1
except Exception as e:
files_with_problems += 1
print("Problem adding expectations file {file} got error {e}".format(file=f, e=e),
file=sys.stderr)
# normalize, write and keep track of running likelihood
self.normalize(update_transitions=update_transitions, update_emissions=update_emissions)
self.write(hmm_file)
self.running_likelihoods.append(self.likelihood)
self.reset_assignments()
print("[trainModels] NOTICE: Added {success} expectations files successfully, {problem} files had problems\n"
"".format(success=files_added_successfully, problem=files_with_problems), file=sys.stderr)
def _initialize_hdp_model(self):
"""Read in HDP model and make sure parameters match the ONT model"""
with open(self.hdp_path, 'r') as hdp_fh:
hdp_alphabet_size = int(hdp_fh.readline())
assert self.alphabet_size == hdp_alphabet_size, \
"ONT Alphabet size does not match HDP model ({} != {})".format(self.alphabet_size, hdp_alphabet_size)
hdp_alphabet = hdp_fh.readline().rstrip()
assert self.alphabet == hdp_alphabet, \
"ONT Alphabet size does not match HDP model ({} != {})".format(self.alphabet, hdp_alphabet)
hdp_kmer_length = int(hdp_fh.readline())
assert self.kmer_length == hdp_kmer_length, \
"ONT Kmer length size does not match HDP model ({} != {})".format(self.kmer_length, hdp_kmer_length)
self.splines_finalized = bool(int(hdp_fh.readline()))
self.has_data = bool(int(hdp_fh.readline()))
self.sample_gamma = bool(int(hdp_fh.readline()))
self.num_dps = int(hdp_fh.readline())
self.data = [float(x) for x in hdp_fh.readline().split()]
self.dp_ids = [int(x) for x in hdp_fh.readline().split()]
unpack_line = hdp_fh.readline().split()
self.mu = float(unpack_line[0])
self.nu = float(unpack_line[1])
self.alpha = float(unpack_line[2])
self.beta = float(unpack_line[3])
unpack_line = hdp_fh.readline().split()
self.grid_start = int(unpack_line[0])
self.grid_stop = int(unpack_line[1])
self.grid_length = int(unpack_line[2])
self.linspace = np.linspace(self.grid_start, self.grid_stop, num=self.grid_length)
self.gamma_params = [float(x) for x in hdp_fh.readline().split()]
if self.sample_gamma:
self.gamma_alpha = [float(x) for x in hdp_fh.readline().split()]
self.gamma_beta = [float(x) for x in hdp_fh.readline().split()]
self.w = [float(x) for x in hdp_fh.readline().split()]
self.s = [bool(int(x)) for x in hdp_fh.readline().split()]
for i in range(self.num_dps):
line = hdp_fh.readline().split()
parent_id = line[0]
num_factor_children = line[1]
if parent_id == '-':
pass
# print(num_factor_children)
for _ in range(self.num_dps):
post_pred = [float(x) for x in hdp_fh.readline().split()]
self.all_posterior_pred.append(post_pred)
for _ in range(self.num_dps):
spline_slopes = [float(x) for x in hdp_fh.readline().split()]
self.all_spline_slopes.append(spline_slopes)
line = hdp_fh.readline()
factor_list = []
while line:
items = line.split()
if int(items[0]) == 0:
fctr = "SOMETHING"
param_array = items[2].split(';')
if int(items[0]) == 1:
# new_middle_factor
fctr = items[2]
if int(items[0]) == 2:
# new_data_pt_factor
fctr = items[2]
factor_list.append(fctr)
if items[1] != '-':
pass
line = hdp_fh.readline()
self.has_hdp_model = True
@staticmethod
def grid_spline_interp(query_x, x, y, slope, length):
# if event mean is below start of grid
if query_x <= x[0]:
return y[0] - slope[0] * (x[0] - query_x)
# if event mean is above end grid
elif query_x >= x[length - 1]:
n = length - 1
return y[n] + slope[n] * (query_x - x[n])
else:
dx = x[1] - x[0]
idx_left = int((query_x - x[0]) // dx)
idx_right = idx_left + 1
dy = y[idx_right] - y[idx_left]
a = slope[idx_left] * dx - dy
b = dy - slope[idx_right] * dx
t_left = (query_x - x[idx_left]) / dx
t_right = 1.0 - t_left
return t_right * y[idx_left] + t_left * y[idx_right] + t_left * t_right * (a * t_right + b * t_left)
def plot_kmer_distribution(self, kmer, alignment_file=None, alignment_file_data=None, savefig_dir=None, name=""):
"""Plot the distribution of a kmer with ONT and/or HDP distributions
:param kmer: kmer to plot
:param alignment_file: path to alignment file if you want to plot alignment data as well
:param alignment_file_data: use alignment data if it has already been loaded in
:param savefig_dir: path to plot save directory
:param name: prefix for plots
"""
assert self.has_ont_model, "Must have ONT model loaded"
if savefig_dir:
assert os.path.exists(savefig_dir), "Save figure directory does not exist: {}".format(savefig_dir)
# keep track of handles and text depending on which models are loaded
handles1 = []
legend_text1 = []
handles2 = []
legend_text2 = []
normal_mean, normal_sd = self.get_event_mean_gaussian_parameters(kmer)
fig = plt.figure(figsize=(12, 8))
panel1 = plt.axes([0.1, 0.1, .6, .8])
panel1.set_xlabel('pA')
panel1.set_ylabel('Density')
panel1.grid(color='black', linestyle='-', linewidth=1, alpha=0.5)
panel1.xaxis.set_major_locator(ticker.AutoLocator())
panel1.xaxis.set_minor_locator(ticker.AutoMinorLocator())
min_x = normal_mean - (5 * normal_sd)
max_x = normal_mean + (5 * normal_sd)
panel1.set_xlim(min_x, max_x)
panel1.set_title(label=kmer)
# plot ont normal distribution
x = np.linspace(normal_mean - 4 * normal_sd, normal_mean + 4 * normal_sd, 200)
ont_handle, = panel1.plot(x, norm.pdf(x, normal_mean, normal_sd))
# panel1.plot([normal_mean, normal_mean], [0, norm.pdf(normal_mean, normal_mean, normal_sd)], lw=2)
ont_model_name = os.path.basename(self.ont_model_file)
txt_handle1, = panel1.plot([], [], ' ')
txt_handle2, = panel1.plot([], [], ' ')
txt_handle3, = panel1.plot([], [], ' ')
handles1.append(ont_handle)
legend_text1.append("ONT Normal Distribution")
handles2.extend([txt_handle1, txt_handle2, txt_handle3])
legend_text2.extend(["ONT Model: \n {}".format(ont_model_name), "ONT Event Mean: {}".format(normal_mean),
"ONT Event SD: {}".format(normal_sd)])
if self.has_hdp_model:
# plot HDP predicted distribution
kmer_id = self.get_kmer_index(kmer)
x = self.linspace
panel1.set_xlim(min(x), max(x))
hdp_y = self.all_posterior_pred[kmer_id]
hdp_handle, = panel1.plot(x, hdp_y, '-')
# compute entropy and hellinger distance
ont_normal_dist = norm.pdf(self.linspace, normal_mean, normal_sd)
kl_distance = entropy(pk=hdp_y, qk=ont_normal_dist, base=2)
h_distance = hellinger2(p=hdp_y, q=ont_normal_dist)
# deal with some extra text
txt_handle4, = panel1.plot([], [], ' ')
txt_handle5, = panel1.plot([], [], ' ')
txt_handle6, = panel1.plot([], [], ' ')
hdp_model_name = os.path.basename(self.hdp_path)
handles1.append(hdp_handle)
legend_text1.append("HDP Distribution")
handles2.extend([txt_handle4, txt_handle5, txt_handle6])
legend_text2.extend(["HDP Model: \n {}".format(hdp_model_name),
"Kullback–Leibler divergence: {}".format(np.round(kl_distance, 4)),
"Hellinger distance: {}".format(np.round(h_distance, 4))])
if alignment_file is not None or alignment_file_data is not None:
# option to parse file or not
if alignment_file is not None:
data = parse_assignment_file(alignment_file)
else:
data = alignment_file_data
kmer_assignments = data.loc[data['kmer'] == kmer]
kmer_data = kmer_assignments["level_mean"]
# get event means and linspace in correct format
x = np.asarray(kmer_data).reshape(len(kmer_data), 1)
x_plot = self.linspace[:, np.newaxis]
# get estimate for data
if len(kmer_data) > 0:
kde = KernelDensity(kernel="gaussian", bandwidth=0.5).fit(x)
# estimate across the linspace
log_dens = kde.score_samples(x_plot)
kde_handle, = panel1.plot(x_plot[:, 0], np.exp(log_dens), '-')
raw_data_handle, = panel1.plot(x[:, 0], -0.005 - 0.01 * np.random.random(x.shape[0]), '+k')
# add to legend
handles1.extend([kde_handle, raw_data_handle])
legend_text1.extend(["Gaussian KDE Estimate", "Event Means: {} points".format(len(kmer_data))])
txt_handle7, = panel1.plot([], [], ' ')
if alignment_file:
alignment_file_name = os.path.basename(alignment_file)
handles2.append(txt_handle7)
legend_text2.append("RAW event data file: \n {}".format(alignment_file_name))
else:
print("{} not found in alignment file".format(kmer))
# create legend
first_legend = panel1.legend(handles1, legend_text1, fancybox=True, shadow=True,
loc='lower left', bbox_to_anchor=(1, .8))
ax = plt.gca().add_artist(first_legend)
panel1.legend(handles2, legend_text2, loc='upper left', bbox_to_anchor=(1, 0.2))
# option to save figure or just show it
if savefig_dir:
base_name = "DNA_"
if self.rna:
base_name = "RNA_"
out_name = "{}_{}_{}.png".format(name, base_name, kmer)
out_path = os.path.join(savefig_dir, out_name)
plt.savefig(out_path)
else:
plt.show()
plt.close(fig)
def get_kl_divergence(self, kmer, nanopolish=False):
"""Get Kullback–Leibler divergence between the HDP and ONT models for a specific kmer"""
kmer_id = self.get_kmer_index(kmer)
hdp_y = self.all_posterior_pred[kmer_id]
if len(hdp_y) == 0:
# print("[Kullback–Leibler divergence] No HDP data for {}".format(kmer))
return None
normal_mean, normal_sd = self.get_event_mean_gaussian_parameters(kmer, nanopolish=nanopolish)
ont_normal_dist = norm.pdf(self.linspace, normal_mean, normal_sd)
kl_divergence = entropy(pk=hdp_y, qk=ont_normal_dist, base=2)
if kl_divergence == np.inf:
# print("[Kullback–Leibler divergence] Zero probability for {}".format(kmer))
return None
return kl_divergence
def get_hellinger_distance(self, kmer, nanopolish=False):
"""Get Hellinger distance between the HDP and ONT models for a specific kmer"""
kmer_id = self.get_kmer_index(kmer)
hdp_y = self.all_posterior_pred[kmer_id]
if len(hdp_y) == 0:
# print("[Hellinger Distance] No HDP data for {}".format(kmer))
return None
normal_mean, normal_sd = self.get_event_mean_gaussian_parameters(kmer, nanopolish=nanopolish)
ont_normal_dist = norm.pdf(self.linspace, normal_mean, normal_sd)
h_distance = hellinger2(p=hdp_y, q=ont_normal_dist)
return h_distance
def get_median_delta(self, kmer, nanopolish=False):
"""Calculate the difference between the max value of HDP and ONT kmer distributions"""
kmer_id = self.get_kmer_index(kmer)
hdp_y = self.all_posterior_pred[kmer_id]
if len(hdp_y) == 0:
# print("[Median Delta] No HDP data for {}".format(kmer))
return None
normal_mean, normal_sd = self.get_event_mean_gaussian_parameters(kmer, nanopolish=nanopolish)
delta = self.linspace[hdp_y.index(max(hdp_y))] - normal_mean
return abs(delta)
def compare_distributions(self):
"""Calculate hellinger divergence and kl divergence between the HDP and ONT models for each kmer"""
hellinger_distances = []
kl_divergences = []
median_deltas = []
for kmer in self.sorted_kmer_tuple:
# if statements used if the HDP model does not have information on the kmer distribution
h_dist = self.get_hellinger_distance(kmer)
if h_dist:
hellinger_distances.append(h_dist)
kl_divergence = self.get_kl_divergence(kmer)
if kl_divergence:
kl_divergences.append(kl_divergence)
# print(kmer, kl_divergence)
median_delta = self.get_median_delta(kmer)
if median_delta:
if len(median_deltas) > 0 and median_delta > max(median_deltas):
pass
# print(kmer, median_delta)
median_deltas.append(median_delta)
return hellinger_distances, kl_divergences, median_deltas
def write_new_model(self, out_path, alphabet, replacement_base):
"""Write a correctly formatted new model file with a new alphabet.
:param out_path: path to output hmm
:param alphabet: new alphabet
:param replacement_base: base to replaced by the new character
note: will retain same kmer size and assumes only one new character
"""
# the model file has the format:
# line 0: stateNumber \t alphabetSize \t alphabet \t kmerLength
# line 1: match->match \t match->gapX \t match->gapY \t
# gapX->match \t gapX->gapX \t gapX->gapY \t
# gapY->match \t gapY->gapX \t gapY->gapY \n
# line 2: [level_mean] [level_sd] [noise_mean] [noise_sd] [noise_lambda ](.../kmer) \n
assert self.has_ont_model, "Shouldn't be writing down a Hmm that has no Model"
if not self.normalized:
self.normalize_transitions_expectations()
alphabet = "".join(sorted(alphabet.upper()))
for base in alphabet:
assert not is_non_canonical_iupac_base(base), \
"You cannot use IUPAC character to represent multiple bases. {}".format(base)
replacement_base = replacement_base.upper()
new_base = (set(alphabet) - set(self.alphabet)).pop()
alphabet_size = len(alphabet)
new_kmers = all_string_permutations(alphabet, length=self.kmer_length)
with open(out_path, 'w') as f:
# line 0
f.write("{stateNumber}\t{alphabetSize}\t{alphabet}\t{kmerLength}\n"
"".format(stateNumber=self.state_number, alphabetSize=alphabet_size,
alphabet=alphabet, kmerLength=self.kmer_length))
# line 1 transitions
for i in range(self.state_number * self.state_number):
f.write("{transition}\t".format(transition=str(self.transitions[i])))
# likelihood
f.write("{}\n".format(str(self.likelihood)))
# line 2 Event Model
for kmer in new_kmers:
generic_kmer = kmer.replace(new_base, replacement_base)
k = self.get_kmer_index(generic_kmer)
f.write("{level_mean}\t{level_sd}\t{noise_mean}\t{noise_sd}\t{noise_lambda}\t"
"".format(level_mean=self.event_model["means"][k], level_sd=self.event_model["SDs"][k],
noise_mean=self.event_model["noise_means"][k],
noise_sd=self.event_model["noise_SDs"][k],
noise_lambda=self.event_model["noise_lambdas"][k]))
f.write("\n")
def set_kmer_event_mean(self, kmer, event_mean):
"""Set ont event mean for a given kmer
:param kmer: valid K-mer
:param event_mean: value to set as new mean
"""
k = self.get_kmer_index(kmer)
self.event_model["means"][k] = event_mean
def set_kmer_event_sd(self, kmer, event_sd):
"""Set ont event sd for a given kmer
:param kmer: valid K-mer
:param event_sd: value to set as new kmer mean sd
"""
k = self.get_kmer_index(kmer)
self.event_model["SDs"][k] = event_sd
def set_kmer_noise_means(self, kmer, noise_means):
"""Set ont noise mean for a given kmer
:param kmer: valid K-mer
:param noise_means: value to set as new kmer noise_means
"""
k = self.get_kmer_index(kmer)
self.event_model["noise_means"][k] = noise_means
def set_kmer_noise_SDs(self, kmer, noise_SDs):
"""Set ont noise sd for a given kmer
:param kmer: valid K-mer
:param noise_SDs: value to set as new kmer noise_SDs
"""
k = self.get_kmer_index(kmer)
self.event_model["noise_SDs"][k] = noise_SDs
def set_kmer_noise_lambdas(self, kmer, noise_lambdas):
"""Set ont noise lambda for a given kmer
:param kmer: valid K-mer
:param noise_lambdas: value to set as new kmer noise_lambdas
"""
k = self.get_kmer_index(kmer)
self.event_model["noise_lambdas"][k] = noise_lambdas
def plot_kmer_distributions(self, kmer_list, alignment_file=None, alignment_file_data=None, savefig_dir=None,
name=""):
"""Plot multiple kmer distribution onto a single plot with ONT and/or HDP distributions
:param kmer_list: list of kmers to plot
:param alignment_file: path to alignment file if you want to plot alignment data as well
:param alignment_file_data: use alignment data if it has already been loaded in
:param savefig_dir: path to plot save directory
:param name: prefix for file output
"""
assert self.has_ont_model, "Must have ONT model loaded"
if savefig_dir:
assert os.path.exists(savefig_dir), "Save figure directory does not exist: {}".format(savefig_dir)
# keep track of handles and text depending on which models are loaded
handles1 = []
legend_text1 = []
handles2 = []
legend_text2 = []
fig = plt.figure(figsize=(12, 8))
panel1 = plt.axes([0.1, 0.1, .6, .8])
panel1.set_xlabel('pA')
panel1.set_ylabel('Density')
panel1.grid(color='black', linestyle='-', linewidth=1, alpha=0.5)
panel1.xaxis.set_major_locator(ticker.AutoLocator())
panel1.xaxis.set_minor_locator(ticker.AutoMinorLocator())
min_x = 1000
max_x = 0
for kmer in kmer_list:
normal_mean, normal_sd = self.get_event_mean_gaussian_parameters(kmer)
tmp_min_x = normal_mean - (5 * normal_sd)
tmp_max_x = normal_mean + (5 * normal_sd)
if min_x > tmp_min_x:
min_x = tmp_min_x
if max_x < tmp_max_x:
max_x = tmp_max_x
# plot ont normal distribution
x = np.linspace(normal_mean - 4 * normal_sd, normal_mean + 4 * normal_sd, 200)
ont_handle, = panel1.plot(x, norm.pdf(x, normal_mean, normal_sd), label=kmer)
# panel1.plot([normal_mean, normal_mean], [0, norm.pdf(normal_mean, normal_mean, normal_sd)], lw=2)
ont_model_name = os.path.basename(self.ont_model_file)
txt_handle1, = panel1.plot([], [], ' ')
txt_handle2, = panel1.plot([], [], ' ')
txt_handle3, = panel1.plot([], [], ' ')
handles1.append(ont_handle)
legend_text1.append("{} ONT Normal".format(kmer))
handles2.extend([txt_handle1, txt_handle2, txt_handle3])
legend_text2.extend(["{} ONT Model: \n {}".format(kmer, ont_model_name),
"{} ONT Event Mean: {}".format(kmer, normal_mean),
"{} ONT Event SD: {}".format(kmer, normal_sd)])
if self.has_hdp_model:
# plot HDP predicted distribution
kmer_id = self.get_kmer_index(kmer)
x = self.linspace
panel1.set_xlim(min(x), max(x))
hdp_y = self.all_posterior_pred[kmer_id]
if len(hdp_y) == len(x):
hdp_handle, = panel1.plot(x, hdp_y, '-')
handles1.append(hdp_handle)
legend_text1.append("{} HDP Distribution".format(kmer))
# # compute entropy and hellinger distance
# ont_normal_dist = norm.pdf(self.linspace, normal_mean, normal_sd)
# kl_distance = entropy(pk=hdp_y, qk=ont_normal_dist, base=2)
# h_distance = hellinger2(p=hdp_y, q=ont_normal_dist)
#
# hdp_model_name = os.path.basename(self.hdp_path)
# # deal with some extra text
# txt_handle4, = panel1.plot([], [], ' ')
# txt_handle5, = panel1.plot([], [], ' ')
# txt_handle6, = panel1.plot([], [], ' ')
#
# handles2.extend([txt_handle4, txt_handle5, txt_handle6])
# legend_text2.extend(["HDP Model: \n {}".format(hdp_model_name),
# "Kullback–Leibler divergence: {}".format(np.round(kl_distance, 4)),
# "Hellinger distance: {}".format(np.round(h_distance, 4))])
if alignment_file is not None or alignment_file_data is not None:
# option to parse file or not
if alignment_file is not None:
data = parse_assignment_file(alignment_file)
else:
data = alignment_file_data
kmer_assignments = data.loc[data['kmer'] == kmer]
kmer_data = kmer_assignments["level_mean"]
# get event means and linspace in correct format
x = np.asarray(kmer_data).reshape(len(kmer_data), 1)
x_plot = self.linspace[:, np.newaxis]
# get estimate for data
if len(kmer_data) > 0:
kde = KernelDensity(kernel="gaussian", bandwidth=0.5).fit(x)
# estimate across the linspace
log_dens = kde.score_samples(x_plot)
kde_handle, = panel1.plot(x_plot[:, 0], np.exp(log_dens), '-')
raw_data_handle, = panel1.plot(x[:, 0], -0.005 - 0.01 * np.random.random(x.shape[0]), '+k')
# add to legend
handles1.extend([kde_handle, raw_data_handle])
legend_text1.extend(["Gaussian KDE Estimate", "Event Means: {} points".format(len(kmer_data))])
txt_handle7, = panel1.plot([], [], ' ')
if alignment_file:
alignment_file_name = os.path.basename(alignment_file)
handles2.append(txt_handle7)
legend_text2.append("RAW event data file: \n {}".format(alignment_file_name))
else:
print("{} not found in alignment file".format(kmer))
# create legend
first_legend = panel1.legend(handles1, legend_text1, fancybox=True, shadow=True,
loc='lower left', bbox_to_anchor=(1, .8))
ax = plt.gca().add_artist(first_legend)
panel1.legend(handles2, legend_text2, loc='upper left', bbox_to_anchor=(1, 0.2))
panel1.set_xlim(min_x, max_x)
panel1.set_title("Kmer distribution comparisons")
# option to save figure or just show it
if savefig_dir:
base_name = "DNA_comparison"
if self.rna:
base_name = "RNA_comparison"
out_name = "{}_{}_{}.png".format(name, base_name, "_".join(kmer_list))
out_path = os.path.join(savefig_dir, out_name)
plt.savefig(out_path)
else:
plt.show()
plt.close(fig)
def get_hdp_probability(self, kmer, event_mean):
"""Get the probability that an event mean came from hdp distribution for the given kmer
:param kmer: kmer that must be in model
:param event_mean: event mean to compare
:return: probability that the event mean came from the hdp distribution
"""
assert self.has_hdp_model, "HmmModel does not have HDP model. Must have hdp model to get probability"
kmer_id = self.get_kmer_index(kmer)
y = self.all_posterior_pred[kmer_id]
if len(y) == 0:
return None
slope = self.all_spline_slopes[kmer_id]
prob = self.grid_spline_interp(event_mean, self.linspace, y, slope, self.grid_length)
return prob
def get_new_linspace_hdp_probability_distribution(self, kmer, linspace):
"""Get newly descretized distribution given a new linspace
:param kmer: kmer from model
:param linspace: array of evenly spaced floats to get probability at each point
"""
new_linspace_probs = []
kmer_id = self.get_kmer_index(kmer)
y = self.all_posterior_pred[kmer_id]
if len(y) == 0:
return None
slope = self.all_spline_slopes[kmer_id]
for x in linspace:
new_linspace_probs.append(self.grid_spline_interp(x, self.linspace, y, slope, self.grid_length))
return new_linspace_probs
def hellinger2(p, q):
return euclidean(np.sqrt(p), np.sqrt(q)) / _SQRT2
def create_new_model(model_path, new_model_path, find_replace_set):
"""Write a correctly formatted new model file with a new alphabet.
:param model_path: path to original HMM model
:param new_model_path: path to new model
:param find_replace_set: set of tuples with first index as find character and second is the replace character
:returns HMM model with new alphabet
note: will retain same kmer size and can handle multiple new nucleotides as long as they are not IUPAC bases
"""
model_h = HmmModel(model_path)
n_new_bases = len(find_replace_set)
counter = 1
base_name = os.path.basename(new_model_path)
with tempfile.TemporaryDirectory() as tempdir:
for old, new in find_replace_set:
if counter == n_new_bases:
new_file_name = new_model_path
else:
new_file_name = os.path.join(tempdir, str(counter) + base_name)
model_h.write_new_model(new_file_name, alphabet=model_h.alphabet + new, replacement_base=old)
model_h = HmmModel(new_file_name)
counter += 1
return model_h
def gaussian_param_to_inv_gaussian_param(mu, sigma):
"""Take the gaussian parameters for mu and sigma and convert into inverse gaussian parameters
:param mu: mean
:param sigma: standard deviation
:return: mu, lambda
"""
return mu, ((mu**3) / (sigma**2))
def load_nanopolish_model(model_file, as_dict=False):
"""Load HMM model from nanopolish model file
the model file has the format:
1st couple lines have # : #ont_model_name r9.4_180mv_450bps_6mer
#kit r9.4_450bps
#strand template
#k 6
#original_file r9.4_180mv_450bps_6mer/template_median68pA.model
header line: kmer level_mean level_stdv sd_mean sd_stdv weight
:param model_file: path to model file
:param as_dict: boolean option to return as a dictionary with kmers as keys
"""
assert os.path.exists(model_file), "[load_nanopolish_model] - didn't find model here: {}".format(model_file)
nanopolish_event_model = {}
means = []
SDs = []
noise_means = []
noise_SDs = []
noise_lambdas = []
kmers = []
with open(model_file, 'r') as fH:
for line in fH:
if not line.startswith("#"):
split_line = line.split()
if split_line[1] == "level_mean":
continue
noise_lambda = gaussian_param_to_inv_gaussian_param(float(split_line[3]),
float(split_line[4]))[1]
if as_dict:
kmers.append(split_line[0])
nanopolish_event_model[split_line[0]] = [float(split_line[1]),
float(split_line[2]),
float(split_line[3]),
float(split_line[4]),
noise_lambda]
else:
kmers.append(split_line[0])
means.append(float(split_line[1]))
SDs.append(float(split_line[2]))
noise_means.append(float(split_line[3]))
noise_SDs.append(float(split_line[4]))
noise_lambdas.append(noise_lambda)
if not as_dict:
nanopolish_event_model["means"] = np.asarray(means)
nanopolish_event_model["SDs"] = np.asarray(SDs)
nanopolish_event_model["noise_means"] = np.asarray(noise_means)
nanopolish_event_model["noise_SDs"] = np.asarray(noise_SDs)
nanopolish_event_model["noise_lambdas"] = np.asarray(noise_lambdas)
assert not np.any(nanopolish_event_model["means"] == 0.0), "signalHmm.load_model, this model has 0 E_means"
assert not np.any(nanopolish_event_model["SDs"] == 0.0), "signalHmm.load_model, this model has 0 E_means"
assert not np.any(
nanopolish_event_model["noise_means"] == 0.0), "signalHmm.load_model, this model has 0 E_noise_means"
assert not np.any(
nanopolish_event_model["noise_SDs"] == 0.0), "signalHmm.load_model, this model has 0 E_noise_SDs"
k = len(kmers[0])
alphabet = "".join(sorted(list(set("".join(kmers)))))
return nanopolish_event_model, alphabet, k
def convert_nanopolish_model_to_signalalign(nanopolish_model, transition_probs, output_path,
state_number=3, likelihood=0):
"""Convert nanopolish model into signalalign model
:param nanopolish_model: path to nanopolish model
:param transition_probs: transition probabilities for hmm
:param output_path: path to new signalalign model
:param likelihood: likelihood of model (set to zero because we do not have this info from nanopolish)
:param state_number: type of hmm (3 state is default)
"""
nanopolish_event_model, alphabet, kmer_length = load_nanopolish_model(nanopolish_model)
alphabet = "".join(sorted(alphabet.upper()))
for base in alphabet:
assert not is_non_canonical_iupac_base(base), \
"You cannot use IUPAC character to represent multiple bases. {}".format(base)
alphabet_size = len(alphabet)
new_kmers = all_string_permutations(alphabet, length=kmer_length)
with open(output_path, 'w') as f:
# line 0
f.write("{stateNumber}\t{alphabetSize}\t{alphabet}\t{kmerLength}\n"
"".format(stateNumber=state_number, alphabetSize=alphabet_size,
alphabet=alphabet, kmerLength=kmer_length))
# line 1 transitions
for i in range(state_number * state_number):
f.write("{transition}\t".format(transition=str(transition_probs[i])))
# likelihood
f.write("{}\n".format(str(likelihood)))
# line 2 Event Model
for kmer in new_kmers:
k_index = HmmModel._get_kmer_index(kmer, alphabet, kmer_length, alphabet_size)
f.write("{level_mean}\t{level_sd}\t{noise_mean}\t{noise_sd}\t{noise_lambda}\t"
"".format(level_mean=nanopolish_event_model["means"][k_index],
level_sd=nanopolish_event_model["SDs"][k_index],
noise_mean=nanopolish_event_model["noise_means"][k_index],
noise_sd=nanopolish_event_model["noise_SDs"][k_index],
noise_lambda=nanopolish_event_model["noise_lambdas"][k_index]))
f.write("\n")
return output_path
def convert_and_edit_nanopolish_model_to_signalalign(nanopolish_model, transition_probs, output_path,
find_replace=["M", "E"], state_number=3, likelihood=0):
"""Convert nanopolish model into signalalign model
:param find_replace: find character and replace it with another
:param nanopolish_model: path to nanopolish model
:param transition_probs: transition probabilities for hmm
:param output_path: path to new signalalign model
:param likelihood: likelihood of model (set to zero because we do not have this info from nanopolish)
:param state_number: type of hmm (3 state is default)
"""
nanopolish_event_model, alphabet, kmer_length = load_nanopolish_model(nanopolish_model, as_dict=True)
alphabet = "".join(sorted(alphabet.upper()))
new_alphabet = "".join(sorted(alphabet.replace(find_replace[0], find_replace[1])))
for base in new_alphabet:
assert not is_non_canonical_iupac_base(base), \
"You cannot use IUPAC character to represent multiple bases. {}".format(base)
alphabet_size = len(alphabet)
new_kmers = all_string_permutations(new_alphabet, length=kmer_length)
with open(output_path, 'w') as f:
# line 0
f.write("{stateNumber}\t{alphabetSize}\t{alphabet}\t{kmerLength}\n"
"".format(stateNumber=state_number, alphabetSize=alphabet_size,
alphabet=new_alphabet, kmerLength=kmer_length))
# line 1 transitions
for i in range(state_number * state_number):
f.write("{transition}\t".format(transition=str(transition_probs[i])))
# likelihood
f.write("{}\n".format(str(likelihood)))
# line 2 Event Model
for kmer in new_kmers:
old_kmer = kmer.replace(find_replace[1], find_replace[0])
kmer_data = nanopolish_event_model[old_kmer]
f.write("{level_mean}\t{level_sd}\t{noise_mean}\t{noise_sd}\t{noise_lambda}\t"
"".format(level_mean=float(kmer_data[0]),
level_sd=float(kmer_data[1]),
noise_mean=float(kmer_data[2]),
noise_sd=float(kmer_data[3]),
noise_lambda=float(kmer_data[4])))
f.write("\n")
return output_path
| 47.895557
| 150
| 0.59891
|
788426a5474cd746d7f96fcf02d96485c121e0bd
| 33,818
|
py
|
Python
|
Python Project Filter-Detect-GUI/mne/channels/layout.py
|
JulienL3vesque/Hexoskin_RnD_OSM
|
b524430d6f4b2b300d119b6a1586141e6c2d14a3
|
[
"MIT"
] | null | null | null |
Python Project Filter-Detect-GUI/mne/channels/layout.py
|
JulienL3vesque/Hexoskin_RnD_OSM
|
b524430d6f4b2b300d119b6a1586141e6c2d14a3
|
[
"MIT"
] | null | null | null |
Python Project Filter-Detect-GUI/mne/channels/layout.py
|
JulienL3vesque/Hexoskin_RnD_OSM
|
b524430d6f4b2b300d119b6a1586141e6c2d14a3
|
[
"MIT"
] | null | null | null |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Marijn van Vliet <w.m.vanvliet@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: Simplified BSD
import logging
from collections import defaultdict
from itertools import combinations
import os.path as op
import numpy as np
from ..transforms import _pol_to_cart, _cart_to_sph
from ..bem import fit_sphere_to_headshape
from ..io.pick import pick_types
from ..io.constants import FIFF
from ..io.meas_info import Info
from ..utils import _clean_names, warn
from ..externals.six.moves import map
class Layout(object):
"""Sensor layouts.
Layouts are typically loaded from a file using read_layout. Only use this
class directly if you're constructing a new layout.
Parameters
----------
box : tuple of length 4
The box dimension (x_min, x_max, y_min, y_max).
pos : array, shape=(n_channels, 4)
The positions of the channels in 2d (x, y, width, height).
names : list
The channel names.
ids : list
The channel ids.
kind : str
The type of Layout (e.g. 'Vectorview-all').
"""
def __init__(self, box, pos, names, ids, kind): # noqa: D102
self.box = box
self.pos = pos
self.names = names
self.ids = ids
self.kind = kind
def save(self, fname):
"""Save Layout to disk.
Parameters
----------
fname : str
The file name (e.g. 'my_layout.lout').
See Also
--------
read_layout
"""
x = self.pos[:, 0]
y = self.pos[:, 1]
width = self.pos[:, 2]
height = self.pos[:, 3]
if fname.endswith('.lout'):
out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box
elif fname.endswith('.lay'):
out_str = ''
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
for ii in range(x.shape[0]):
out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' % (self.ids[ii],
x[ii], y[ii], width[ii], height[ii], self.names[ii]))
f = open(fname, 'w')
f.write(out_str)
f.close()
def __repr__(self):
"""Return the string representation."""
return '<Layout | %s - Channels: %s ...>' % (self.kind,
', '.join(self.names[:3]))
def plot(self, picks=None, show=True):
"""Plot the sensor positions.
Parameters
----------
picks : array-like
Indices of the channels to show. If None (default), all the
channels are shown.
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
Figure containing the sensor topography.
Notes
-----
.. versionadded:: 0.12.0
"""
from ..viz.topomap import plot_layout
return plot_layout(self, picks=picks, show=show)
def _read_lout(fname):
"""Aux function."""
with open(fname) as f:
box_line = f.readline() # first line contains box dimension
box = tuple(map(float, box_line.split()))
names, pos, ids = [], [], []
for line in f:
splits = line.split()
if len(splits) == 7:
cid, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def _read_lay(fname):
"""Aux function."""
with open(fname) as f:
box = None
names, pos, ids = [], [], []
for line in f:
splits = line.split()
if len(splits) == 7:
cid, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def read_layout(kind, path=None, scale=True):
"""Read layout from a file.
Parameters
----------
kind : str
The name of the .lout file (e.g. kind='Vectorview-all' for
'Vectorview-all.lout').
path : str | None
The path of the folder containing the Layout file. Defaults to the
mne/channels/data/layouts folder inside your mne-python installation.
scale : bool
Apply useful scaling for out the box plotting using layout.pos.
Defaults to True.
Returns
-------
layout : instance of Layout
The layout.
See Also
--------
Layout.save
"""
if path is None:
path = op.join(op.dirname(__file__), 'data', 'layouts')
if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')):
kind += '.lout'
elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')):
kind += '.lay'
if kind.endswith('.lout'):
fname = op.join(path, kind)
kind = kind[:-5]
box, pos, names, ids = _read_lout(fname)
elif kind.endswith('.lay'):
fname = op.join(path, kind)
kind = kind[:-4]
box, pos, names, ids = _read_lay(fname)
kind.endswith('.lay')
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
if scale:
pos[:, 0] -= np.min(pos[:, 0])
pos[:, 1] -= np.min(pos[:, 1])
scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
pos /= scaling
pos[:, :2] += 0.03
pos[:, :2] *= 0.97 / 1.03
pos[:, 2:] *= 0.94
return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids)
def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads'):
"""Create .lout file from EEG electrode digitization.
Parameters
----------
info : instance of Info
Measurement info (e.g., raw.info).
radius : float
Viewport radius as a fraction of main figure height. Defaults to 0.5.
width : float | None
Width of sensor axes as a fraction of main figure height. By default,
this will be the maximum width possible without axes overlapping.
height : float | None
Height of sensor axes as a fraction of main figure height. By default,
this will be the maximum height possible withough axes overlapping.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any.
If 'bads', exclude channels in info['bads'] (default).
Returns
-------
layout : Layout
The generated Layout.
See Also
--------
make_grid_layout, generate_2d_layout
"""
if not (0 <= radius <= 0.5):
raise ValueError('The radius parameter should be between 0 and 0.5.')
if width is not None and not (0 <= width <= 1.0):
raise ValueError('The width parameter should be between 0 and 1.')
if height is not None and not (0 <= height <= 1.0):
raise ValueError('The height parameter should be between 0 and 1.')
picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
loc2d = _auto_topomap_coords(info, picks)
names = [info['chs'][i]['ch_name'] for i in picks]
# Scale [x, y] to [-0.5, 0.5]
loc2d_min = np.min(loc2d, axis=0)
loc2d_max = np.max(loc2d, axis=0)
loc2d = (loc2d - (loc2d_max + loc2d_min) / 2.) / (loc2d_max - loc2d_min)
# If no width or height specified, calculate the maximum value possible
# without axes overlapping.
if width is None or height is None:
width, height = _box_size(loc2d, width, height, padding=0.1)
# Scale to viewport radius
loc2d *= 2 * radius
# Some subplot centers will be at the figure edge. Shrink everything so it
# fits in the figure.
scaling = min(1 / (1. + width), 1 / (1. + height))
loc2d *= scaling
width *= scaling
height *= scaling
# Shift to center
loc2d += 0.5
n_channels = loc2d.shape[0]
pos = np.c_[loc2d[:, 0] - 0.5 * width,
loc2d[:, 1] - 0.5 * height,
width * np.ones(n_channels),
height * np.ones(n_channels)]
box = (0, 1, 0, 1)
ids = 1 + np.arange(n_channels)
layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids)
return layout
def make_grid_layout(info, picks=None, n_col=None):
"""Generate .lout file for custom data, i.e., ICA sources.
Parameters
----------
info : instance of Info | None
Measurement info (e.g., raw.info). If None, default names will be
employed.
picks : array-like of int | None
The indices of the channels to be included. If None, al misc channels
will be included.
n_col : int | None
Number of columns to generate. If None, a square grid will be produced.
Returns
-------
layout : Layout
The generated layout.
See Also
--------
make_eeg_layout, generate_2d_layout
"""
if picks is None:
picks = pick_types(info, misc=True, ref_meg=False, exclude='bads')
names = [info['chs'][k]['ch_name'] for k in picks]
if not names:
raise ValueError('No misc data channels found.')
ids = list(range(len(picks)))
size = len(picks)
if n_col is None:
# prepare square-like layout
n_row = n_col = np.sqrt(size) # try square
if n_col % 1:
# try n * (n-1) rectangle
n_col, n_row = int(n_col + 1), int(n_row)
if n_col * n_row < size: # jump to the next full square
n_row += 1
else:
n_row = int(np.ceil(size / float(n_col)))
# setup position grid
x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col),
np.linspace(-0.5, 0.5, n_row))
x, y = x.ravel()[:size], y.ravel()[:size]
width, height = _box_size(np.c_[x, y], padding=0.1)
# Some axes will be at the figure edge. Shrink everything so it fits in the
# figure. Add 0.01 border around everything
border_x, border_y = (0.01, 0.01)
x_scaling = 1 / (1. + width + border_x)
y_scaling = 1 / (1. + height + border_y)
x = x * x_scaling
y = y * y_scaling
width *= x_scaling
height *= y_scaling
# Shift to center
x += 0.5
y += 0.5
# calculate pos
pos = np.c_[x - 0.5 * width, y - 0.5 * height,
width * np.ones(size), height * np.ones(size)]
box = (0, 1, 0, 1)
layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids)
return layout
def find_layout(info, ch_type=None, exclude='bads'):
"""Choose a layout based on the channels in the info 'chs' field.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : {'mag', 'grad', 'meg', 'eeg'} | None
The channel type for selecting single channel layouts.
Defaults to None. Note, this argument will only be considered for
VectorView type layout. Use `meg` to force using the full layout
in situations where the info does only contain one sensor type.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any.
If 'bads', exclude channels in info['bads'] (default).
Returns
-------
layout : Layout instance | None
None if layout not found.
"""
our_types = ' or '.join(['`None`', '`mag`', '`grad`', '`meg`'])
if ch_type not in (None, 'meg', 'mag', 'grad', 'eeg'):
raise ValueError('Invalid channel type (%s) requested '
'`ch_type` must be %s' % (ch_type, our_types))
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = set([ch['coil_type'] & 0xFFFF for ch in chs])
channel_types = set([ch['kind'] for ch in chs])
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_vv_meg = has_vv_mag and has_vv_grad
has_vv_only_mag = has_vv_mag and not has_vv_grad
has_vv_only_grad = has_vv_grad and not has_vv_mag
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
if ch_type == "meg" and not has_any_meg:
raise RuntimeError('No MEG channels present. Cannot find MEG layout.')
if ch_type == "eeg" and not has_eeg_coils:
raise RuntimeError('No EEG channels present. Cannot find EEG layout.')
if ((has_vv_meg and ch_type is None) or
(any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')):
layout_name = 'Vectorview-all'
elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'):
layout_name = 'Vectorview-mag'
elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'):
if info['ch_names'][0].endswith('X'):
layout_name = 'Vectorview-grad_norm'
else:
layout_name = 'Vectorview-grad'
elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or
(has_eeg_coils_and_meg and ch_type == 'eeg')):
if not isinstance(info, (dict, Info)):
raise RuntimeError('Cannot make EEG layout, no measurement info '
'was passed to `find_layout`')
return make_eeg_layout(info, exclude=exclude)
elif has_4D_mag:
layout_name = 'magnesWH3600'
elif has_CTF_grad:
layout_name = 'CTF-275'
elif n_kit_grads > 0:
layout_name = _find_kit_layout(info, n_kit_grads)
else:
xy = _auto_topomap_coords(info, picks=range(info['nchan']),
ignore_overlap=True, to_sphere=False)
return generate_2d_layout(xy, ch_names=info['ch_names'], name='custom',
normalize=False)
layout = read_layout(layout_name)
if not is_old_vv:
layout.names = _clean_names(layout.names, remove_whitespace=True)
if has_CTF_grad:
layout.names = _clean_names(layout.names, before_dash=True)
# Apply mask for excluded channels.
if exclude == 'bads':
exclude = info['bads']
idx = [ii for ii, name in enumerate(layout.names) if name not in exclude]
layout.names = [layout.names[ii] for ii in idx]
layout.pos = layout.pos[idx]
layout.ids = [layout.ids[ii] for ii in idx]
return layout
def _find_kit_layout(info, n_grads):
"""Determine the KIT layout.
Parameters
----------
info : Info
Info object.
n_grads : int
Number of KIT-gradiometers in the info.
Returns
-------
kit_layout : str
One of 'KIT-AD', 'KIT-157' or 'KIT-UMD'.
"""
if info['kit_system_id'] is not None:
# avoid circular import
from ..io.kit.constants import KIT_LAYOUT
if info['kit_system_id'] in KIT_LAYOUT:
kit_layout = KIT_LAYOUT[info['kit_system_id']]
if kit_layout is not None:
return kit_layout
raise NotImplementedError("The layout for the KIT system with ID %i "
"is missing. Please contact the developers "
"about adding it." % info['kit_system_id'])
elif n_grads > 157:
return 'KIT-AD'
# channels which are on the left hemisphere for NY and right for UMD
test_chs = ('MEG 13', 'MEG 14', 'MEG 15', 'MEG 16', 'MEG 25',
'MEG 26', 'MEG 27', 'MEG 28', 'MEG 29', 'MEG 30',
'MEG 31', 'MEG 32', 'MEG 57', 'MEG 60', 'MEG 61',
'MEG 62', 'MEG 63', 'MEG 64', 'MEG 73', 'MEG 90',
'MEG 93', 'MEG 95', 'MEG 96', 'MEG 105', 'MEG 112',
'MEG 120', 'MEG 121', 'MEG 122', 'MEG 123', 'MEG 124',
'MEG 125', 'MEG 126', 'MEG 142', 'MEG 144', 'MEG 153',
'MEG 154', 'MEG 155', 'MEG 156')
x = [ch['loc'][0] < 0 for ch in info['chs'] if ch['ch_name'] in test_chs]
if np.all(x):
return 'KIT-157' # KIT-NY
elif np.all(np.invert(x)):
raise NotImplementedError("Guessing sensor layout for legacy UMD "
"files is not implemented. Please convert "
"your files using MNE-Python 0.13 or "
"higher.")
else:
raise RuntimeError("KIT system could not be determined for data")
def _box_size(points, width=None, height=None, padding=0.0):
"""Given a series of points, calculate an appropriate box size.
Parameters
----------
points : array, shape (n_points, 2)
The centers of the axes as a list of (x, y) coordinate pairs. Normally
these are points in the range [0, 1] centered at 0.5.
width : float | None
An optional box width to enforce. When set, only the box height will be
calculated by the function.
height : float | None
An optional box height to enforce. When set, only the box width will be
calculated by the function.
padding : float
Portion of the box to reserve for padding. The value can range between
0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
Returns
-------
width : float
Width of the box
height : float
Height of the box
"""
from scipy.spatial.distance import pdist
def xdiff(a, b):
return np.abs(a[0] - b[0])
def ydiff(a, b):
return np.abs(a[1] - b[1])
points = np.asarray(points)
all_combinations = list(combinations(points, 2))
if width is None and height is None:
if len(points) <= 1:
# Trivial case first
width = 1.0
height = 1.0
else:
# Find the closest two points A and B.
a, b = all_combinations[np.argmin(pdist(points))]
# The closest points define either the max width or max height.
w, h = xdiff(a, b), ydiff(a, b)
if w > h:
width = w
else:
height = h
# At this point, either width or height is known, or both are known.
if height is None:
# Find all axes that could potentially overlap horizontally.
hdist = pdist(points, xdiff)
candidates = [all_combinations[i] for i, d in enumerate(hdist)
if d < width]
if len(candidates) == 0:
# No axes overlap, take all the height you want.
height = 1.0
else:
# Find an appropriate height so all none of the found axes will
# overlap.
height = np.min([ydiff(*c) for c in candidates])
elif width is None:
# Find all axes that could potentially overlap vertically.
vdist = pdist(points, ydiff)
candidates = [all_combinations[i] for i, d in enumerate(vdist)
if d < height]
if len(candidates) == 0:
# No axes overlap, take all the width you want.
width = 1.0
else:
# Find an appropriate width so all none of the found axes will
# overlap.
width = np.min([xdiff(*c) for c in candidates])
# Add a bit of padding between boxes
width *= 1 - padding
height *= 1 - padding
return width, height
def _find_topomap_coords(info, picks, layout=None):
"""Guess the E/MEG layout and return appropriate topomap coordinates.
Parameters
----------
info : instance of Info
Measurement info.
picks : list of int
Channel indices to generate topomap coords for.
layout : None | instance of Layout
Enforce using a specific layout. With None, a new map is generated.
With None, a layout is chosen based on the channels in the chs
parameter.
Returns
-------
coords : array, shape = (n_chs, 2)
2 dimensional coordinates for each sensor for a topomap plot.
"""
if len(picks) == 0:
raise ValueError("Need more than 0 channels.")
if layout is not None:
chs = [info['chs'][i] for i in picks]
pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
pos = np.asarray(pos)
else:
pos = _auto_topomap_coords(info, picks)
return pos
def _auto_topomap_coords(info, picks, ignore_overlap=False, to_sphere=True):
"""Make a 2 dimensional sensor map from sensor positions in an info dict.
The default is to use the electrode locations. The fallback option is to
attempt using digitization points of kind FIFFV_POINT_EEG. This only works
with EEG and requires an equal number of digitization points and sensors.
Parameters
----------
info : instance of Info
The measurement info.
picks : list of int
The channel indices to generate topomap coords for.
ignore_overlap : bool
Whether to ignore overlapping positions in the layout. If False and
positions overlap, an error is thrown.
to_sphere : bool
If True, the radial distance of spherical coordinates is ignored, in
effect fitting the xyz-coordinates to a sphere. Defaults to True.
Returns
-------
locs : array, shape = (n_sensors, 2)
An array of positions of the 2 dimensional map.
"""
from scipy.spatial.distance import pdist, squareform
chs = [info['chs'][i] for i in picks]
# Use channel locations if available
locs3d = np.array([ch['loc'][:3] for ch in chs])
# If electrode locations are not available, use digization points
if len(locs3d) == 0 or np.allclose(locs3d, 0):
logging.warning('Did not find any electrode locations the info, '
'will attempt to use digitization points instead. '
'However, if digitization points do not correspond to '
'the EEG electrodes, this will lead to bad results. '
'Please verify that the sensor locations in the plot '
'are accurate.')
# MEG/EOG/ECG sensors don't have digitization points; all requested
# channels must be EEG
for ch in chs:
if ch['kind'] != FIFF.FIFFV_EEG_CH:
raise ValueError("Cannot determine location of MEG/EOG/ECG "
"channels using digitization points.")
eeg_ch_names = [ch['ch_name'] for ch in info['chs']
if ch['kind'] == FIFF.FIFFV_EEG_CH]
# Get EEG digitization points
if info['dig'] is None or len(info['dig']) == 0:
raise RuntimeError('No digitization points found.')
locs3d = np.array([point['r'] for point in info['dig']
if point['kind'] == FIFF.FIFFV_POINT_EEG])
if len(locs3d) == 0:
raise RuntimeError('Did not find any digitization points of '
'kind FIFFV_POINT_EEG (%d) in the info.'
% FIFF.FIFFV_POINT_EEG)
if len(locs3d) != len(eeg_ch_names):
raise ValueError("Number of EEG digitization points (%d) "
"doesn't match the number of EEG channels "
"(%d)" % (len(locs3d), len(eeg_ch_names)))
# Center digitization points on head origin
dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
FIFF.FIFFV_POINT_EEG,
FIFF.FIFFV_POINT_EXTRA)
_, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds, units='m')
locs3d -= origin_head
# Match the digitization points with the requested
# channels.
eeg_ch_locs = dict(zip(eeg_ch_names, locs3d))
locs3d = np.array([eeg_ch_locs[ch['ch_name']] for ch in chs])
# Duplicate points cause all kinds of trouble during visualization
dist = pdist(locs3d)
if np.min(dist) < 1e-10 and not ignore_overlap:
problematic_electrodes = [
chs[elec_i]['ch_name']
for elec_i in squareform(dist < 1e-10).any(axis=0).nonzero()[0]
]
raise ValueError('The following electrodes have overlapping positions:'
'\n ' + str(problematic_electrodes) + '\nThis '
'causes problems during visualization.')
if to_sphere:
# use spherical (theta, pol) as (r, theta) for polar->cartesian
return _pol_to_cart(_cart_to_sph(locs3d)[:, 1:][:, ::-1])
return _pol_to_cart(_cart_to_sph(locs3d))
def _topo_to_sphere(pos, eegs):
"""Transform xy-coordinates to sphere.
Parameters
----------
pos : array-like, shape (n_channels, 2)
xy-oordinates to transform.
eegs : list of int
Indices of eeg channels that are included when calculating the sphere.
Returns
-------
coords : array, shape (n_channels, 3)
xyz-coordinates.
"""
xs, ys = np.array(pos).T
sqs = np.max(np.sqrt((xs[eegs] ** 2) + (ys[eegs] ** 2)))
xs /= sqs # Shape to a sphere and normalize
ys /= sqs
xs += 0.5 - np.mean(xs[eegs]) # Center the points
ys += 0.5 - np.mean(ys[eegs])
xs = xs * 2. - 1. # Values ranging from -1 to 1
ys = ys * 2. - 1.
rs = np.clip(np.sqrt(xs ** 2 + ys ** 2), 0., 1.)
alphas = np.arccos(rs)
zs = np.sin(alphas)
return np.column_stack([xs, ys, zs])
def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads',
raise_error=True):
"""Find the picks for pairing grad channels.
Parameters
----------
info : instance of Info
An info dictionary containing channel information.
layout : Layout | None
The layout if available. Defaults to None.
topomap_coords : bool
Return the coordinates for a topomap plot along with the picks. If
False, only picks are returned. Defaults to True.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in info['bads']. Defaults to 'bads'.
raise_error : bool
Whether to raise an error when no pairs are found. If False, raises a
warning.
Returns
-------
picks : array of int
Picks for the grad channels, ordered in pairs.
coords : array, shape = (n_grad_channels, 3)
Coordinates for a topomap plot (optional, only returned if
topomap_coords == True).
"""
# find all complete pairs of grad channels
pairs = defaultdict(list)
grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude)
for i in grad_picks:
ch = info['chs'][i]
name = ch['ch_name']
if name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(ch)
pairs = [p for p in pairs.values() if len(p) == 2]
if len(pairs) == 0:
if raise_error:
raise ValueError("No 'grad' channel pairs found.")
else:
warn("No 'grad' channel pairs found.")
return list()
# find the picks corresponding to the grad channels
grad_chs = sum(pairs, [])
ch_names = info['ch_names']
picks = [ch_names.index(c['ch_name']) for c in grad_chs]
if topomap_coords:
shape = (len(pairs), 2, -1)
coords = (_find_topomap_coords(info, picks, layout)
.reshape(shape).mean(axis=1))
return picks, coords
else:
return picks
# this function is used to pair grad when info is not present
# it is the case of Projection that don't have the info.
def _pair_grad_sensors_from_ch_names(ch_names):
"""Find the indexes for pairing grad channels.
Parameters
----------
ch_names : list of str
A list of channel names.
Returns
-------
indexes : list of int
Indexes of the grad channels, ordered in pairs.
"""
pairs = defaultdict(list)
for i, name in enumerate(ch_names):
if name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(i)
pairs = [p for p in pairs.values() if len(p) == 2]
grad_chs = sum(pairs, [])
return grad_chs
def _merge_grad_data(data, method='rms'):
"""Merge data from channel pairs using the RMS or mean.
Parameters
----------
data : array, shape = (n_channels, n_times)
Data for channels, ordered in pairs.
method : str
Can be 'rms' or 'mean'.
Returns
-------
data : array, shape = (n_channels / 2, n_times)
The root mean square or mean for each pair.
"""
data = data.reshape((len(data) // 2, 2, -1))
if method == 'mean':
data = np.mean(data, axis=1)
elif method == 'rms':
data = np.sqrt(np.sum(data ** 2, axis=1) / 2)
else:
raise ValueError('method must be "rms" or "mean, got %s.' % method)
return data
def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None,
ch_indices=None, name='ecog', bg_image=None,
normalize=True):
"""Generate a custom 2D layout from xy points.
Generates a 2-D layout for plotting with plot_topo methods and
functions. XY points will be normalized between 0 and 1, where
normalization extremes will be either the min/max of xy, or
the width/height of bg_image.
Parameters
----------
xy : ndarray (N x 2)
The xy coordinates of sensor locations.
w : float
The width of each sensor's axis (between 0 and 1)
h : float
The height of each sensor's axis (between 0 and 1)
pad : float
Portion of the box to reserve for padding. The value can range between
0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
ch_names : list
The names of each channel. Must be a list of strings, with one
string per channel.
ch_indices : list
Index of each channel - must be a collection of unique integers,
one index per channel.
name : string
The name of this layout type.
bg_image : str | ndarray
The image over which sensor axes will be plotted. Either a path to an
image file, or an array that can be plotted with plt.imshow. If
provided, xy points will be normalized by the width/height of this
image. If not, xy points will be normalized by their own min/max.
normalize : bool
Whether to normalize the coordinates to run from 0 to 1. Defaults to
True.
Returns
-------
layout : Layout
A Layout object that can be plotted with plot_topo
functions and methods.
See Also
--------
make_eeg_layout, make_grid_layout
Notes
-----
.. versionadded:: 0.9.0
"""
from scipy.ndimage import imread
if ch_indices is None:
ch_indices = np.arange(xy.shape[0])
if ch_names is None:
ch_names = ['{0}'.format(i) for i in ch_indices]
if len(ch_names) != len(ch_indices):
raise ValueError('# ch names and indices must be equal')
if len(ch_names) != len(xy):
raise ValueError('# ch names and xy vals must be equal')
x, y = xy.copy().astype(float).T
# Normalize xy to 0-1
if bg_image is not None:
# Normalize by image dimensions
if isinstance(bg_image, str):
img = imread(bg_image)
else:
img = bg_image
x /= img.shape[1]
y /= img.shape[0]
elif normalize:
# Normalize x and y by their maxes
for i_dim in [x, y]:
i_dim -= i_dim.min(0)
i_dim /= (i_dim.max(0) - i_dim.min(0))
# Create box and pos variable
box = _box_size(np.vstack([x, y]).T, padding=pad)
box = (0, 0, box[0], box[1])
w, h = [np.array([i] * x.shape[0]) for i in [w, h]]
loc_params = np.vstack([x, y, w, h]).T
layout = Layout(box, loc_params, ch_names, ch_indices, name)
return layout
| 34.472987
| 79
| 0.581406
|
22dd641ab6240b2e0b544b76878453bf59a0ebb0
| 40,749
|
py
|
Python
|
raiden/tests/integration/long_running/test_settlement.py
|
sangaman/raiden
|
d67d780909148f34528e129fd777dbfcdf0322dc
|
[
"MIT"
] | null | null | null |
raiden/tests/integration/long_running/test_settlement.py
|
sangaman/raiden
|
d67d780909148f34528e129fd777dbfcdf0322dc
|
[
"MIT"
] | null | null | null |
raiden/tests/integration/long_running/test_settlement.py
|
sangaman/raiden
|
d67d780909148f34528e129fd777dbfcdf0322dc
|
[
"MIT"
] | null | null | null |
import random
from hashlib import sha256
import gevent
import pytest
from eth_utils import to_checksum_address
from gevent.timeout import Timeout
from raiden import waiting
from raiden.api.python import RaidenAPI
from raiden.app import App
from raiden.constants import EMPTY_SIGNATURE, UINT64_MAX
from raiden.exceptions import RaidenUnrecoverableError
from raiden.messages.transfers import LockedTransfer, LockExpired, RevealSecret, Unlock
from raiden.messages.withdraw import WithdrawExpired
from raiden.storage.restore import channel_state_until_state_change
from raiden.storage.sqlite import HIGH_STATECHANGE_ULID, RANGE_ALL_STATE_CHANGES
from raiden.tests.utils import factories
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.events import raiden_state_changes_search_for_item, search_for_item
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.protocol import WaitForMessage
from raiden.tests.utils.transfer import assert_synced_channel_state, get_channelstate, transfer
from raiden.transfer import channel, views
from raiden.transfer.events import SendWithdrawConfirmation
from raiden.transfer.identifiers import CanonicalIdentifier
from raiden.transfer.state_change import (
ContractReceiveChannelBatchUnlock,
ContractReceiveChannelClosed,
ContractReceiveChannelSettled,
)
from raiden.utils import sha3
from raiden.utils.secrethash import sha256_secrethash
from raiden.utils.timeout import BlockTimeout
from raiden.utils.typing import BlockNumber, MessageID, PaymentAmount, PaymentID, Secret
def wait_for_batch_unlock(app, token_network_address, receiver, sender):
unlock_event = None
while not unlock_event:
gevent.sleep(1)
state_changes = app.raiden.wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
unlock_event = search_for_item(
state_changes,
ContractReceiveChannelBatchUnlock,
{
"token_network_address": token_network_address,
"receiver": receiver,
"sender": sender,
},
)
def is_channel_registered(
node_app: App, partner_app: App, canonical_identifier: CanonicalIdentifier
) -> bool:
"""True if the `node_app` has a channel with `partner_app` in its state."""
token_network = views.get_token_network_by_address(
chain_state=views.state_from_app(node_app),
token_network_address=canonical_identifier.token_network_address,
)
assert token_network
is_in_channelid_map = (
canonical_identifier.channel_identifier in token_network.channelidentifiers_to_channels
)
is_in_partner_map = (
canonical_identifier.channel_identifier
in token_network.partneraddresses_to_channelidentifiers[partner_app.raiden.address]
)
return is_in_channelid_map and is_in_partner_map
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
def test_settle_is_automatically_called(raiden_network, token_addresses):
"""Settle is automatically called by one of the nodes."""
app0, app1 = raiden_network
registry_address = app0.raiden.default_registry.address
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token_address
)
assert token_network_address
token_network = views.get_token_network_by_address(
views.state_from_app(app0), token_network_address
)
assert token_network
channel_identifier = get_channelstate(app0, app1, token_network_address).identifier
assert (
channel_identifier
in token_network.partneraddresses_to_channelidentifiers[app1.raiden.address]
)
# A ChannelClose event will be generated, this will be polled by both apps
# and each must start a task for calling settle
RaidenAPI(app1.raiden).channel_close(registry_address, token_address, app0.raiden.address)
waiting.wait_for_close(
app0.raiden,
registry_address,
token_address,
[channel_identifier],
app0.raiden.alarm.sleep_time,
)
channel_state = views.get_channelstate_for(
views.state_from_raiden(app0.raiden), registry_address, token_address, app1.raiden.address
)
assert channel_state
assert channel_state.close_transaction
assert channel_state.close_transaction.finished_block_number
waiting.wait_for_settle(
app0.raiden,
registry_address,
token_address,
[channel_identifier],
app0.raiden.alarm.sleep_time,
)
token_network = views.get_token_network_by_address(
views.state_from_app(app0), token_network_address
)
assert token_network
assert (
channel_identifier
not in token_network.partneraddresses_to_channelidentifiers[app1.raiden.address]
)
state_changes = app0.raiden.wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
assert search_for_item(
state_changes,
ContractReceiveChannelClosed,
{
"token_network_address": token_network_address,
"channel_identifier": channel_identifier,
"transaction_from": app1.raiden.address,
"block_number": channel_state.close_transaction.finished_block_number,
},
)
assert search_for_item(
state_changes,
ContractReceiveChannelSettled,
{"token_network_address": token_network_address, "channel_identifier": channel_identifier},
)
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
def test_lock_expiry(raiden_network, token_addresses, deposit):
"""Test lock expiry and removal."""
alice_app, bob_app = raiden_network
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(alice_app), alice_app.raiden.default_registry.address, token_address
)
assert token_network_address
hold_event_handler = bob_app.raiden.raiden_event_handler
wait_message_handler = bob_app.raiden.message_handler
token_network = views.get_token_network_by_address(
views.state_from_app(alice_app), token_network_address
)
assert token_network
channel_state = get_channelstate(alice_app, bob_app, token_network_address)
channel_identifier = channel_state.identifier
assert (
channel_identifier
in token_network.partneraddresses_to_channelidentifiers[bob_app.raiden.address]
)
alice_to_bob_amount = 10
identifier = 1
target = bob_app.raiden.address
transfer_1_secret = factories.make_secret(0)
transfer_1_secrethash = sha256_secrethash(transfer_1_secret)
transfer_2_secret = factories.make_secret(1)
transfer_2_secrethash = sha256_secrethash(transfer_2_secret)
hold_event_handler.hold_secretrequest_for(secrethash=transfer_1_secrethash)
transfer1_received = wait_message_handler.wait_for_message(
LockedTransfer, {"lock": {"secrethash": transfer_1_secrethash}}
)
transfer2_received = wait_message_handler.wait_for_message(
LockedTransfer, {"lock": {"secrethash": transfer_2_secrethash}}
)
remove_expired_lock_received = wait_message_handler.wait_for_message(
LockExpired, {"secrethash": transfer_1_secrethash}
)
alice_app.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=alice_to_bob_amount,
target=target,
identifier=identifier,
secret=transfer_1_secret,
)
transfer1_received.wait()
alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_address)
lock = channel.get_lock(alice_bob_channel_state.our_state, transfer_1_secrethash)
assert lock
# This is the current state of the protocol:
#
# A -> B LockedTransfer
# B -> A SecretRequest
# - protocol didn't continue
assert_synced_channel_state(
token_network_address, alice_app, deposit, [lock], bob_app, deposit, []
)
# Verify lock is registered in both channel states
alice_channel_state = get_channelstate(alice_app, bob_app, token_network_address)
assert transfer_1_secrethash in alice_channel_state.our_state.secrethashes_to_lockedlocks
bob_channel_state = get_channelstate(bob_app, alice_app, token_network_address)
assert transfer_1_secrethash in bob_channel_state.partner_state.secrethashes_to_lockedlocks
alice_chain_state = views.state_from_raiden(alice_app.raiden)
assert transfer_1_secrethash in alice_chain_state.payment_mapping.secrethashes_to_task
remove_expired_lock_received.wait()
alice_channel_state = get_channelstate(alice_app, bob_app, token_network_address)
assert transfer_1_secrethash not in alice_channel_state.our_state.secrethashes_to_lockedlocks
# Verify Bob received the message and processed the LockExpired message
bob_channel_state = get_channelstate(bob_app, alice_app, token_network_address)
assert transfer_1_secrethash not in bob_channel_state.partner_state.secrethashes_to_lockedlocks
alice_chain_state = views.state_from_raiden(alice_app.raiden)
assert transfer_1_secrethash not in alice_chain_state.payment_mapping.secrethashes_to_task
# Make another transfer
alice_to_bob_amount = 10
identifier = 2
hold_event_handler.hold_secretrequest_for(secrethash=transfer_2_secrethash)
alice_app.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=alice_to_bob_amount,
target=target,
identifier=identifier,
secret=transfer_2_secret,
)
transfer2_received.wait()
# Make sure the other transfer still exists
alice_chain_state = views.state_from_raiden(alice_app.raiden)
assert transfer_2_secrethash in alice_chain_state.payment_mapping.secrethashes_to_task
bob_channel_state = get_channelstate(bob_app, alice_app, token_network_address)
assert transfer_2_secrethash in bob_channel_state.partner_state.secrethashes_to_lockedlocks
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
def test_batch_unlock(raiden_network, token_addresses, secret_registry_address, deposit):
"""Tests that batch unlock is properly called.
This test will start a single incomplete transfer, the secret will be
revealed *on-chain*. The node that receives the tokens has to call unlock,
the node that doesn't gain anything does nothing.
"""
alice_app, bob_app = raiden_network
alice_address = alice_app.raiden.address
bob_address = bob_app.raiden.address
token_network_registry_address = alice_app.raiden.default_registry.address
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(alice_app), token_network_registry_address, token_address
)
assert token_network_address
hold_event_handler = bob_app.raiden.raiden_event_handler
# Take a snapshot early on
alice_app.raiden.wal.snapshot()
canonical_identifier = get_channelstate(
alice_app, bob_app, token_network_address
).canonical_identifier
assert is_channel_registered(alice_app, bob_app, canonical_identifier)
assert is_channel_registered(bob_app, alice_app, canonical_identifier)
token_proxy = alice_app.raiden.proxy_manager.token(token_address)
alice_initial_balance = token_proxy.balance_of(alice_app.raiden.address)
bob_initial_balance = token_proxy.balance_of(bob_app.raiden.address)
# Take snapshot before transfer
alice_app.raiden.wal.snapshot()
alice_to_bob_amount = 10
identifier = 1
secret = Secret(sha3(bob_address))
secrethash = sha256_secrethash(secret)
secret_request_event = hold_event_handler.hold_secretrequest_for(secrethash=secrethash)
alice_app.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=alice_to_bob_amount,
target=bob_address,
identifier=identifier,
secret=secret,
)
secret_request_event.get() # wait for the messages to be exchanged
alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_address)
lock = channel.get_lock(alice_bob_channel_state.our_state, secrethash)
assert lock
# This is the current state of the protocol:
#
# A -> B LockedTransfer
# B -> A SecretRequest
# - protocol didn't continue
assert_synced_channel_state(
token_network_address, alice_app, deposit, [lock], bob_app, deposit, []
)
# Test WAL restore to return the latest channel state
alice_app.raiden.wal.snapshot()
our_balance_proof = alice_bob_channel_state.our_state.balance_proof
restored_channel_state = channel_state_until_state_change(
raiden=alice_app.raiden,
canonical_identifier=alice_bob_channel_state.canonical_identifier,
state_change_identifier=HIGH_STATECHANGE_ULID,
)
assert restored_channel_state
our_restored_balance_proof = restored_channel_state.our_state.balance_proof
assert our_balance_proof == our_restored_balance_proof
# Close the channel before revealing the secret off-chain. This will leave
# a pending lock in the channel which has to be unlocked on-chain.
#
# The token network will emit a ChannelClose event, this will be polled by
# both apps and each must start a task for calling settle.
RaidenAPI(bob_app.raiden).channel_close(
token_network_registry_address, token_address, alice_app.raiden.address
)
# The secret has to be registered manually because Bob never learned the
# secret. The test is holding the SecretRequest to ensure the off-chain
# unlock will not happen and the channel is closed with a pending lock.
#
# Alternatives would be to hold the unlock messages, or to stop and restart
# the apps after the channel is closed.
secret_registry_proxy = alice_app.raiden.proxy_manager.secret_registry(secret_registry_address)
secret_registry_proxy.register_secret(secret=secret)
msg = (
"The lock must still be part of the node state for the test to proceed, "
"otherwise there is not unlock to be done."
)
assert lock, msg
msg = (
"The secret must be registered before the lock expires, in order for "
"the unlock to happen on-chain. Otherwise the test will fail on the "
"expected balances."
)
assert lock.expiration > alice_app.raiden.get_block_number(), msg
assert lock.secrethash == sha256(secret).digest()
waiting.wait_for_settle(
alice_app.raiden,
token_network_registry_address,
token_address,
[alice_bob_channel_state.identifier],
alice_app.raiden.alarm.sleep_time,
)
msg = "The channel_state must not have been cleared, one of the ends has pending locks to do."
assert is_channel_registered(alice_app, bob_app, canonical_identifier), msg
assert is_channel_registered(bob_app, alice_app, canonical_identifier), msg
msg = (
"Timeout while waiting for the unlock to be mined. This may happen if "
"transaction is rejected, not mined, or the node's alarm task is "
"not running."
)
with gevent.Timeout(seconds=30, exception=AssertionError(msg)):
# Wait for both nodes (Bob and Alice) to see the on-chain unlock
wait_for_batch_unlock(
app=alice_app,
token_network_address=token_network_address,
receiver=bob_address,
sender=alice_address,
)
wait_for_batch_unlock(
app=bob_app,
token_network_address=token_network_address,
receiver=bob_address,
sender=alice_address,
)
msg = (
"The nodes have done the unlock, and both ends have seen it, now the "
"channel must be cleared"
)
assert not is_channel_registered(alice_app, bob_app, canonical_identifier), msg
assert not is_channel_registered(bob_app, alice_app, canonical_identifier), msg
alice_new_balance = alice_initial_balance + deposit - alice_to_bob_amount
bob_new_balance = bob_initial_balance + deposit + alice_to_bob_amount
msg = "Unexpected end balance after channel settlement with batch unlock."
assert token_proxy.balance_of(alice_app.raiden.address) == alice_new_balance, msg
assert token_proxy.balance_of(bob_app.raiden.address) == bob_new_balance, msg
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
def test_channel_withdraw(
raiden_network, number_of_nodes, token_addresses, deposit, network_wait, retry_timeout
):
""" Withdraw funds after a mediated transfer."""
alice_app, bob_app = raiden_network
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(alice_app), alice_app.raiden.default_registry.address, token_address
)
assert token_network_address
token_proxy = bob_app.raiden.proxy_manager.token(token_address)
bob_initial_balance = token_proxy.balance_of(bob_app.raiden.address)
message_handler = WaitForMessage()
bob_app.raiden.message_handler = message_handler
alice_to_bob_amount = 10
identifier = 1
target = bob_app.raiden.address
secret = sha3(target)
payment_status = alice_app.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=alice_to_bob_amount,
target=target,
identifier=identifier,
secret=secret,
)
wait_for_unlock = bob_app.raiden.message_handler.wait_for_message(
Unlock, {"payment_identifier": identifier}
)
timeout = network_wait * number_of_nodes
with Timeout(seconds=timeout):
wait_for_unlock.get()
msg = (
f"transfer from {to_checksum_address(alice_app.raiden.address)} "
f"to {to_checksum_address(bob_app.raiden.address)} failed."
)
assert payment_status.payment_done.get(), msg
total_withdraw = deposit + alice_to_bob_amount
bob_alice_channel_state = get_channelstate(bob_app, alice_app, token_network_address)
bob_app.raiden.withdraw(
canonical_identifier=bob_alice_channel_state.canonical_identifier,
total_withdraw=total_withdraw,
)
waiting.wait_for_withdraw_complete(
raiden=bob_app.raiden,
canonical_identifier=bob_alice_channel_state.canonical_identifier,
total_withdraw=total_withdraw,
retry_timeout=retry_timeout,
)
bob_balance_after_withdraw = token_proxy.balance_of(bob_app.raiden.address)
assert bob_initial_balance + total_withdraw == bob_balance_after_withdraw
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
def test_channel_withdraw_expired(
raiden_network, number_of_nodes, token_addresses, deposit, network_wait, retry_timeout
):
""" Tests withdraw expiration. """
alice_app, bob_app = raiden_network
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(alice_app), alice_app.raiden.default_registry.address, token_address
)
assert token_network_address
# Prevent withdraw confirmation from being sent
send_withdraw_confirmation_event = alice_app.raiden.raiden_event_handler.hold(
SendWithdrawConfirmation, {}
)
alice_to_bob_amount = 10
total_withdraw = deposit + alice_to_bob_amount
wait_for_withdraw_expired_message = alice_app.raiden.message_handler.wait_for_message(
WithdrawExpired, {"total_withdraw": total_withdraw}
)
identifier = 1
target = bob_app.raiden.address
secret = sha3(target)
payment_status = alice_app.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=alice_to_bob_amount,
target=target,
identifier=identifier,
secret=secret,
)
wait_for_unlock = bob_app.raiden.message_handler.wait_for_message(
Unlock, {"payment_identifier": identifier}
)
timeout = network_wait * number_of_nodes
with Timeout(seconds=timeout):
wait_for_unlock.get()
msg = (
f"transfer from {to_checksum_address(alice_app.raiden.address)} "
f"to {to_checksum_address(bob_app.raiden.address)} failed."
)
assert payment_status.payment_done.get(), msg
bob_alice_channel_state = get_channelstate(bob_app, alice_app, token_network_address)
bob_app.raiden.withdraw(
canonical_identifier=bob_alice_channel_state.canonical_identifier,
total_withdraw=total_withdraw,
)
with Timeout(seconds=timeout):
send_withdraw_confirmation_event.wait()
# Make sure proper withdraw state is set in both channel states
bob_alice_channel_state = get_channelstate(bob_app, alice_app, token_network_address)
assert bob_alice_channel_state.our_total_withdraw == total_withdraw
assert bob_alice_channel_state.our_state.withdraws_pending.get(total_withdraw) is not None
alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_address)
assert alice_bob_channel_state.partner_total_withdraw == total_withdraw
assert alice_bob_channel_state.partner_state.withdraws_pending.get(total_withdraw) is not None
withdraw_expiration = bob_alice_channel_state.our_state.withdraws_pending[
total_withdraw
].expiration
expiration_threshold = channel.get_sender_expiration_threshold(withdraw_expiration)
waiting.wait_for_block(
raiden=bob_app.raiden,
block_number=BlockNumber(expiration_threshold + 1),
retry_timeout=retry_timeout,
)
bob_alice_channel_state = get_channelstate(bob_app, alice_app, token_network_address)
assert bob_alice_channel_state.our_total_withdraw == 0
assert bob_alice_channel_state.our_state.withdraws_pending.get(total_withdraw) is None
with Timeout(seconds=timeout):
wait_for_withdraw_expired_message.wait()
alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_address)
assert alice_bob_channel_state.partner_total_withdraw == 0
assert alice_bob_channel_state.partner_state.withdraws_pending.get(total_withdraw) is None
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("channels_per_node", [CHAIN])
def test_settled_lock(token_addresses, raiden_network, deposit):
""" Any transfer following a secret reveal must update the locksroot, so
that an attacker cannot reuse a secret to double claim a lock.
"""
app0, app1 = raiden_network
registry_address = app0.raiden.default_registry.address
token_address = token_addresses[0]
amount = PaymentAmount(30)
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token_address
)
assert token_network_address
hold_event_handler = app1.raiden.raiden_event_handler
address0 = app0.raiden.address
address1 = app1.raiden.address
deposit0 = deposit
deposit1 = deposit
token_proxy = app0.raiden.proxy_manager.token(token_address)
initial_balance0 = token_proxy.balance_of(address0)
initial_balance1 = token_proxy.balance_of(address1)
identifier = 1
target = app1.raiden.address
secret = sha3(target)
secrethash = sha256(secret).digest()
secret_available = hold_event_handler.hold_secretrequest_for(secrethash=secrethash)
app0.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
)
secret_available.wait() # wait for the messages to be exchanged
# Save the pending locks from the pending transfer, used to test the unlock
channelstate_0_1 = get_channelstate(app0, app1, token_network_address)
batch_unlock = channel.get_batch_unlock(channelstate_0_1.our_state)
assert batch_unlock
hold_event_handler.release_secretrequest_for(app1.raiden, secrethash)
transfer(
initiator_app=app0,
target_app=app1,
token_address=token_address,
amount=amount,
identifier=PaymentID(2),
)
RaidenAPI(app1.raiden).channel_close(registry_address, token_address, app0.raiden.address)
waiting.wait_for_settle(
app1.raiden,
app1.raiden.default_registry.address,
token_address,
[channelstate_0_1.identifier],
app1.raiden.alarm.sleep_time,
)
current_block = app0.raiden.rpc_client.block_number()
netting_channel = app1.raiden.proxy_manager.payment_channel(
canonical_identifier=channelstate_0_1.canonical_identifier
)
# The transfer locksroot must not contain the unlocked lock, the
# unlock must fail.
with pytest.raises(RaidenUnrecoverableError):
netting_channel.unlock(
sender=channelstate_0_1.our_state.address,
receiver=channelstate_0_1.partner_state.address,
pending_locks=batch_unlock,
given_block_identifier=current_block,
)
expected_balance0 = initial_balance0 + deposit0 - amount * 2
expected_balance1 = initial_balance1 + deposit1 + amount * 2
assert token_proxy.balance_of(address0) == expected_balance0
assert token_proxy.balance_of(address1) == expected_balance1
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("channels_per_node", [1])
def test_automatic_secret_registration(raiden_chain, token_addresses):
app0, app1 = raiden_chain
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token_address
)
hold_event_handler = app1.raiden.raiden_event_handler
amount = 100
identifier = 1
message_handler = WaitForMessage()
app1.raiden.message_handler = message_handler
target = app1.raiden.address
secret = Secret(sha3(target))
secrethash = sha256_secrethash(secret)
hold_event_handler.hold_secretrequest_for(secrethash=secrethash)
locked_transfer_received = message_handler.wait_for_message(LockedTransfer, {})
app0.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
)
# Wait for app1 to receive the locked transfer.
locked_transfer_received.wait()
# Stop app0 to avoid sending the unlock, this must be done after the locked
# transfer is sent.
app0.raiden.transport.stop()
reveal_secret = RevealSecret(
message_identifier=MessageID(random.randint(0, UINT64_MAX)),
secret=secret,
signature=EMPTY_SIGNATURE,
)
app0.raiden.sign(reveal_secret)
message_handler.on_message(app1.raiden, reveal_secret)
chain_state = views.state_from_app(app1)
secrethash = sha256_secrethash(secret)
target_task = chain_state.payment_mapping.secrethashes_to_task[secrethash]
lock_expiration = target_task.target_state.transfer.lock.expiration # type: ignore
app1.raiden.proxy_manager.wait_until_block(target_block_number=lock_expiration)
assert app1.raiden.default_secret_registry.is_secret_registered(
secrethash=secrethash, block_identifier="latest"
)
@raise_on_failure
@pytest.mark.xfail(reason="test incomplete")
@pytest.mark.parametrize("number_of_nodes", [3])
def test_start_end_attack(token_addresses, raiden_chain, deposit):
""" An attacker can try to steal tokens from a hub or the last node in a
path.
The attacker needs to use two addresses (A1 and A2) and connect both to the
hub H. Once connected a mediated transfer is initialized from A1 to A2
through H. Once the node A2 receives the mediated transfer the attacker
uses the known secret and reveal to close and settle the channel H-A2,
without revealing the secret to H's raiden node.
The intention is to make the hub transfer the token but for him to be
unable to require the token A1."""
amount = 30
token = token_addresses[0]
app0, app1, app2 = raiden_chain # pylint: disable=unbalanced-tuple-unpacking
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token
)
assert token_network_address
hold_event_handler = app2.raiden.raiden_event_handler
# the attacker owns app0 and app2 and creates a transfer through app1
identifier = 1
target = app2.raiden.address
secret = sha3(target)
secrethash = sha256(secret).digest()
hold_event_handler.hold_secretrequest_for(secrethash=secrethash)
app0.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=amount,
fee=0,
target=target,
identifier=identifier,
secret=secret,
)
attack_channel = get_channelstate(app2, app1, token_network_address)
attack_transfer = None # TODO
attack_contract = attack_channel.external_state.netting_channel.address # type: ignore
hub_contract = get_channelstate( # type: ignore
app1, app0, token_network_address
).external_state.netting_channel.address
# start the settle counter
attack_balance_proof = attack_transfer.to_balanceproof() # type: ignore
attack_channel.netting_channel.channel_close(attack_balance_proof) # type: ignore
# wait until the last block to reveal the secret, hopefully we are not
# missing a block during the test
assert attack_transfer
app2.raiden.proxy_manager.wait_until_block(
target_block_number=attack_transfer.lock.expiration - 1
)
# since the attacker knows the secret he can net the lock
# <the commented code below is left for documentation purposes>
# attack_channel.netting_channel.unlock(
# UnlockProofState(unlock_proof, attack_transfer.lock, secret)
# )
# XXX: verify that the secret was publicized
# at this point the hub might not know the secret yet, and won't be able to
# claim the token from the channel A1 - H
# the attacker settles the contract
app2.raiden.proxy_manager.next_block()
attack_channel.netting_channel.settle(token, attack_contract)
# at this point the attacker has the "stolen" funds
attack_contract = app2.raiden.proxy_manager.token_hashchannel[token][attack_contract]
assert attack_contract.participants[app2.raiden.address]["netted"] == deposit + amount
assert attack_contract.participants[app1.raiden.address]["netted"] == deposit - amount
# and the hub's channel A1-H doesn't
hub_contract = app1.raiden.proxy_manager.token_hashchannel[token][hub_contract]
assert hub_contract.participants[app0.raiden.address]["netted"] == deposit
assert hub_contract.participants[app1.raiden.address]["netted"] == deposit
# to mitigate the attack the Hub _needs_ to use a lower expiration for the
# locked transfer between H-A2 than A1-H. For A2 to acquire the token
# it needs to make the secret public in the blockchain so it publishes the
# secret through an event and the Hub is able to require its funds
app1.raiden.proxy_manager.next_block()
# XXX: verify that the Hub has found the secret, close and settle the channel
# the hub has acquired its token
hub_contract = app1.raiden.proxy_manager.token_hashchannel[token][hub_contract]
assert hub_contract.participants[app0.raiden.address]["netted"] == deposit + amount
assert hub_contract.participants[app1.raiden.address]["netted"] == deposit - amount
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
def test_automatic_dispute(raiden_network, deposit, token_addresses):
app0, app1 = raiden_network
registry_address = app0.raiden.default_registry.address
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token_address
)
assert token_network_address
channel0 = get_channelstate(app0, app1, token_network_address)
token_proxy = app0.raiden.proxy_manager.token(channel0.token_address)
initial_balance0 = token_proxy.balance_of(app0.raiden.address)
initial_balance1 = token_proxy.balance_of(app1.raiden.address)
amount0_1 = PaymentAmount(10)
transfer(
initiator_app=app0,
target_app=app1,
token_address=token_address,
amount=amount0_1,
identifier=PaymentID(1),
)
amount1_1 = PaymentAmount(50)
transfer(
initiator_app=app1,
target_app=app0,
token_address=token_address,
amount=amount1_1,
identifier=PaymentID(2),
)
amount0_2 = PaymentAmount(60)
transfer(
initiator_app=app0,
target_app=app1,
token_address=token_address,
amount=amount0_2,
identifier=PaymentID(3),
)
# Alice can only provide one of Bob's transfer, so she is incentivized to
# use the one with the largest transferred_amount.
RaidenAPI(app0.raiden).channel_close(registry_address, token_address, app1.raiden.address)
# Bob needs to provide a transfer otherwise its netted balance will be
# wrong, so he is incentivised to use Alice's transfer with the largest
# transferred_amount.
#
# This is done automatically
# channel1.external_state.update_transfer(
# alice_second_transfer,
# )
waiting.wait_for_settle(
app0.raiden,
registry_address,
token_address,
[channel0.identifier],
app0.raiden.alarm.sleep_time,
)
# check that the channel is properly settled and that Bob's client
# automatically called updateTransfer() to reflect the actual transactions
assert token_proxy.balance_of(token_network_address) == 0
total0 = amount0_1 + amount0_2
total1 = amount1_1
expected_balance0 = initial_balance0 + deposit - total0 + total1
expected_balance1 = initial_balance1 + deposit + total0 - total1
assert token_proxy.balance_of(app0.raiden.address) == expected_balance0
assert token_proxy.balance_of(app1.raiden.address) == expected_balance1
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
def test_batch_unlock_after_restart(raiden_network, token_addresses, deposit):
"""Simulate the case where:
- A sends B a transfer
- B sends A a transfer
- Secrets were never revealed
- B closes channel
- A crashes
- Wait for settle
- Wait for unlock from B
- Restart A
At this point, the current unlock logic will try to unlock
iff the node gains from unlocking. Which means that the node will try to unlock
either side. In the above scenario, each node will unlock its side.
This test makes sure that we do NOT invalidate A's unlock transaction based
on the ContractReceiveChannelBatchUnlock caused by B's unlock.
"""
alice_app, bob_app = raiden_network
registry_address = alice_app.raiden.default_registry.address
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(
chain_state=views.state_from_app(alice_app),
token_network_registry_address=alice_app.raiden.default_registry.address,
token_address=token_address,
)
assert token_network_address
timeout = 10
token_network = views.get_token_network_by_address(
chain_state=views.state_from_app(alice_app), token_network_address=token_network_address
)
assert token_network
channel_identifier = get_channelstate(alice_app, bob_app, token_network_address).identifier
assert (
channel_identifier
in token_network.partneraddresses_to_channelidentifiers[bob_app.raiden.address]
)
alice_to_bob_amount = 10
identifier = 1
alice_transfer_secret = Secret(sha3(alice_app.raiden.address))
alice_transfer_secrethash = sha256_secrethash(alice_transfer_secret)
bob_transfer_secret = Secret(sha3(bob_app.raiden.address))
bob_transfer_secrethash = sha256_secrethash(bob_transfer_secret)
alice_transfer_hold = bob_app.raiden.raiden_event_handler.hold_secretrequest_for(
secrethash=alice_transfer_secrethash
)
bob_transfer_hold = alice_app.raiden.raiden_event_handler.hold_secretrequest_for(
secrethash=bob_transfer_secrethash
)
alice_app.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=alice_to_bob_amount,
target=bob_app.raiden.address,
identifier=identifier,
secret=alice_transfer_secret,
)
bob_app.raiden.start_mediated_transfer_with_secret(
token_network_address=token_network_address,
amount=alice_to_bob_amount,
target=alice_app.raiden.address,
identifier=identifier + 1,
secret=bob_transfer_secret,
)
alice_transfer_hold.wait(timeout=timeout)
bob_transfer_hold.wait(timeout=timeout)
alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_address)
alice_lock = channel.get_lock(alice_bob_channel_state.our_state, alice_transfer_secrethash)
bob_lock = channel.get_lock(alice_bob_channel_state.partner_state, bob_transfer_secrethash)
assert alice_lock
assert bob_lock
# This is the current state of protocol:
#
# A -> B LockedTransfer
# - protocol didn't continue
assert_synced_channel_state(
token_network_address=token_network_address,
app0=alice_app,
balance0=deposit,
pending_locks0=[alice_lock],
app1=bob_app,
balance1=deposit,
pending_locks1=[bob_lock],
)
# A ChannelClose event will be generated, this will be polled by both apps
# and each must start a task for calling settle
RaidenAPI(bob_app.raiden).channel_close(
registry_address=registry_address,
token_address=token_address,
partner_address=alice_app.raiden.address,
)
# wait for the close transaction to be mined, this is necessary to compute
# the timeout for the settle
with gevent.Timeout(timeout):
waiting.wait_for_close(
raiden=alice_app.raiden,
token_network_registry_address=registry_address,
token_address=token_address,
channel_ids=[alice_bob_channel_state.identifier],
retry_timeout=alice_app.raiden.alarm.sleep_time,
)
channel_closed = raiden_state_changes_search_for_item(
bob_app.raiden,
ContractReceiveChannelClosed,
{
"canonical_identifier": {
"token_network_address": token_network_address,
"channel_identifier": alice_bob_channel_state.identifier,
}
},
)
assert isinstance(channel_closed, ContractReceiveChannelClosed)
settle_max_wait_block = BlockNumber(
channel_closed.block_number + alice_bob_channel_state.settle_timeout * 2
)
settle_timeout = BlockTimeout(
RuntimeError("settle did not happen"),
bob_app.raiden,
settle_max_wait_block,
alice_app.raiden.alarm.sleep_time,
)
with settle_timeout:
waiting.wait_for_settle(
raiden=alice_app.raiden,
token_network_registry_address=registry_address,
token_address=token_address,
channel_ids=[alice_bob_channel_state.identifier],
retry_timeout=alice_app.raiden.alarm.sleep_time,
)
with gevent.Timeout(timeout):
wait_for_batch_unlock(
app=bob_app,
token_network_address=token_network_address,
receiver=alice_bob_channel_state.partner_state.address,
sender=alice_bob_channel_state.our_state.address,
)
alice_app.start()
with gevent.Timeout(timeout):
wait_for_batch_unlock(
app=alice_app,
token_network_address=token_network_address,
receiver=alice_bob_channel_state.partner_state.address,
sender=alice_bob_channel_state.our_state.address,
)
| 38.118803
| 99
| 0.745859
|
2423ffc469cc6e0c034b440598188ef3c423d53b
| 2,912
|
py
|
Python
|
paddlenlp/datasets/paws-x.py
|
frozenfish123/PaddleNLP
|
b9c2910fb58730c8341067122c347cde5f6e7567
|
[
"Apache-2.0"
] | 1
|
2021-06-24T05:27:43.000Z
|
2021-06-24T05:27:43.000Z
|
paddlenlp/datasets/paws-x.py
|
liliustb/PaddleNLP
|
17fe183370809337d42390c0842272cef87c5c9d
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/datasets/paws-x.py
|
liliustb/PaddleNLP
|
17fe183370809337d42390c0842272cef87c5c9d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os
from paddle.dataset.common import md5file
from paddle.utils.download import get_path_from_url
from paddlenlp.utils.env import DATA_HOME
from . import DatasetBuilder
__all__ = ['PAWS']
class PAWS(DatasetBuilder):
"""
PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification
More information please refer to `https://arxiv.org/abs/1908.11828`
Here we only store simplified Chinese(zh) version.
"""
URL = "https://dataset-bj.cdn.bcebos.com/qianyan/paws-x-zh.zip"
MD5 = "f1c6f2ab8afb1f29fe04a0c929e3ab1c"
META_INFO = collections.namedtuple('META_INFO', ('file', 'md5'))
SPLITS = {
'train': META_INFO(
os.path.join('paws-x-zh', 'paws-x-zh', 'train.tsv'),
'3422ba98e5151c91bbb0a785c4873a4c'),
'dev': META_INFO(
os.path.join('paws-x-zh', 'paws-x-zh', 'dev.tsv'),
'dc163453e728cf118e17b4065d6602c8'),
'test': META_INFO(
os.path.join('paws-x-zh', 'paws-x-zh', 'test.tsv'),
'5b7320760e70559591092cb01b6f5955'),
}
def _get_data(self, mode, **kwargs):
default_root = os.path.join(DATA_HOME, self.__class__.__name__)
filename, data_hash = self.SPLITS[mode]
fullname = os.path.join(default_root, filename)
if not os.path.exists(fullname) or (data_hash and
not md5file(fullname) == data_hash):
get_path_from_url(self.URL, default_root, self.MD5)
return fullname
def _read(self, filename):
"""Reads data."""
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
data = line.strip().split("\t")
if len(data) == 3:
sentence1, sentence2, label = data
yield {"sentence1": sentence1, "sentence2": sentence2, "label": label}
elif len(data) == 2:
sentence1, sentence2 = data
yield {"sentence1": sentence1, "sentence2": sentence2, "label":''}
else:
continue
def get_labels(self):
"""
Return labels of the PAWS-X object.
"""
return ["0", "1"]
| 38.315789
| 90
| 0.614011
|
e76e51314471071f031712652fd054d6aaa72604
| 3,081
|
py
|
Python
|
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/positioning/ipositioning.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | 4
|
2016-03-30T14:31:52.000Z
|
2019-02-02T05:01:32.000Z
|
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/positioning/ipositioning.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | 1
|
2020-03-06T04:49:42.000Z
|
2020-03-06T04:49:42.000Z
|
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/positioning/ipositioning.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | 2
|
2019-08-30T23:36:13.000Z
|
2019-11-08T16:52:01.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Positioning interfaces.
@since: 14.0
"""
from zope.interface import Attribute, Interface
class IPositioningReceiver(Interface):
"""
An interface for positioning providers.
"""
def positionReceived(latitude, longitude):
"""
Method called when a position is received.
@param latitude: The latitude of the received position.
@type latitude: L{twisted.positioning.base.Coordinate}
@param longitude: The longitude of the received position.
@type longitude: L{twisted.positioning.base.Coordinate}
"""
def positionErrorReceived(positionError):
"""
Method called when position error is received.
@param positioningError: The position error.
@type positioningError: L{twisted.positioning.base.PositionError}
"""
def timeReceived(time):
"""
Method called when time and date information arrives.
@param time: The date and time (expressed in UTC unless otherwise
specified).
@type time: L{datetime.datetime}
"""
def headingReceived(heading):
"""
Method called when a true heading is received.
@param heading: The heading.
@type heading: L{twisted.positioning.base.Heading}
"""
def altitudeReceived(altitude):
"""
Method called when an altitude is received.
@param altitude: The altitude.
@type altitude: L{twisted.positioning.base.Altitude}
"""
def speedReceived(speed):
"""
Method called when the speed is received.
@param speed: The speed of a mobile object.
@type speed: L{twisted.positioning.base.Speed}
"""
def climbReceived(climb):
"""
Method called when the climb is received.
@param climb: The climb of the mobile object.
@type climb: L{twisted.positioning.base.Climb}
"""
def beaconInformationReceived(beaconInformation):
"""
Method called when positioning beacon information is received.
@param beaconInformation: The beacon information.
@type beaconInformation: L{twisted.positioning.base.BeaconInformation}
"""
class IPositioningBeacon(Interface):
"""
A positioning beacon.
"""
identifier = Attribute(
"""
A unique identifier for this beacon. The type is dependant on the
implementation, but must be immutable.
""")
class INMEAReceiver(Interface):
"""
An object that can receive NMEA data.
"""
def sentenceReceived(sentence):
"""
Method called when a sentence is received.
@param sentence: The received NMEA sentence.
@type L{twisted.positioning.nmea.NMEASentence}
"""
__all__ = [
"IPositioningReceiver",
"IPositioningBeacon",
"INMEAReceiver"
]
| 25.675
| 79
| 0.610841
|
71165d46e57988f8d8a8cd5c927360532e66a108
| 1,238
|
py
|
Python
|
wsgi/iportalen_django/articles/admin.py
|
I-sektionen/i-portalen
|
1713e5814d40c0da1bf3278d60a561e7d3df3550
|
[
"MIT"
] | 4
|
2016-09-21T17:06:01.000Z
|
2018-02-06T16:36:44.000Z
|
wsgi/iportalen_django/articles/admin.py
|
I-sektionen/i-portalen
|
1713e5814d40c0da1bf3278d60a561e7d3df3550
|
[
"MIT"
] | 149
|
2016-03-07T23:50:47.000Z
|
2022-03-11T23:16:33.000Z
|
wsgi/iportalen_django/articles/admin.py
|
I-sektionen/i-portalen
|
1713e5814d40c0da1bf3278d60a561e7d3df3550
|
[
"MIT"
] | 1
|
2016-03-07T23:02:06.000Z
|
2016-03-07T23:02:06.000Z
|
from django.contrib import admin
from .models import Article, OtherAttachment, ImageAttachment
from utils.admin import HiddenModelAdmin, iportalen_admin_site, iportalen_superadmin_site
class OtherAttachmentInline(admin.StackedInline):
model = OtherAttachment
readonly_fields = ('file_name', 'file')
extra = 0
class OtherAttachmentAdmin(admin.ModelAdmin):
readonly_fields = ('file_name', 'modified_by')
list_display = ('article', 'file_name')
list_filter = ('article',)
class ImageAttachmentInline(admin.TabularInline):
model = ImageAttachment
readonly_fields = ('img', 'thumbnail')
extra = 0
class ImageAttachmentAdmin(admin.ModelAdmin):
readonly_fields = ('thumbnail', 'modified_by')
list_display = ('article',)
list_filter = ('article',)
class ArticleAdmin(HiddenModelAdmin):
inlines = [OtherAttachmentInline, ImageAttachmentInline]
iportalen_admin_site.register(ImageAttachment, ImageAttachmentAdmin)
iportalen_admin_site.register(Article, ArticleAdmin)
iportalen_admin_site.register(OtherAttachment, OtherAttachmentAdmin)
iportalen_superadmin_site.register(ImageAttachment)
iportalen_superadmin_site.register(Article)
iportalen_superadmin_site.register(OtherAttachment)
| 30.95
| 89
| 0.792407
|
a07dda4dd86c42beb7862d322a1190c7ad188672
| 1,580
|
py
|
Python
|
tracker/covid_tracker/urls.py
|
mcdomx/marks-covid-tracker
|
df4cf1ba201e52d401f48c246f5f3a1b3de64752
|
[
"MIT"
] | 1
|
2020-08-07T18:19:03.000Z
|
2020-08-07T18:19:03.000Z
|
tracker/covid_tracker/urls.py
|
mcdomx/marks-covid-tracker
|
df4cf1ba201e52d401f48c246f5f3a1b3de64752
|
[
"MIT"
] | 11
|
2021-06-09T17:46:15.000Z
|
2022-03-12T00:58:34.000Z
|
tracker/covid_tracker/urls.py
|
mcdomx/marks-covid-tracker
|
df4cf1ba201e52d401f48c246f5f3a1b3de64752
|
[
"MIT"
] | null | null | null |
"""tracker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, re_path
from .views import views
from .views import plot_state_totals
from .views import plot_state_by_county
from .views import political_affiliation
from .views import plot_top_states
urlpatterns = [
# Default route
path("", views.index_view, name="index"),
path("refresh_git", views.refresh_git, name='refresh_git'),
# path("get_states", views.get_states, name='get_states'),
#
# re_path('get_counties[\/|\?].*', views.get_counties, name='get_counties'),
re_path(r"state_chart[\/|\?].*", plot_state_totals.plot_state_chart, name='plot_state_totals'),
re_path(r"state_by_county_chart[\/|\?].*", plot_state_by_county.plot_state_by_county_chart, name='plot_state_by_county'),
re_path(r"political_affiliation[\/|\?].*", political_affiliation.plot_affiliation, name='political_affiliation'),
re_path(r"top_states[\/|\?].*", plot_top_states.plot_top_states, name='top_states'),
]
| 43.888889
| 125
| 0.725949
|
eb92efcd56b3abff794d668211660aab2bf06c18
| 170
|
py
|
Python
|
src/tarski/fstrips/hybrid/__init__.py
|
phoeft670/tarski
|
7d955e535fbbca012bfd1a12402b97febc6b35b9
|
[
"Apache-2.0"
] | 29
|
2018-11-26T20:31:04.000Z
|
2021-12-29T11:08:40.000Z
|
src/tarski/fstrips/hybrid/__init__.py
|
phoeft670/tarski
|
7d955e535fbbca012bfd1a12402b97febc6b35b9
|
[
"Apache-2.0"
] | 101
|
2018-06-07T13:10:01.000Z
|
2022-03-11T11:54:00.000Z
|
src/tarski/fstrips/hybrid/__init__.py
|
phoeft670/tarski
|
7d955e535fbbca012bfd1a12402b97febc6b35b9
|
[
"Apache-2.0"
] | 18
|
2018-11-01T22:44:39.000Z
|
2022-02-28T04:57:15.000Z
|
from . problem import HybridProblem as Problem
from .. action import Action
from . reaction import Reaction
from . differential_constraints import DifferentialConstraint
| 34
| 61
| 0.841176
|
0f57afe4795d735aa9bd8669de5a5f70635c9f1a
| 548
|
py
|
Python
|
python/clockwork/tasks/variant_call_make_jobs_tsv.py
|
oxfordmmm/clockwork
|
be1f6ea49debf8cb2b8f1ba974df2dc567d150fd
|
[
"MIT"
] | 1
|
2020-01-10T06:43:39.000Z
|
2020-01-10T06:43:39.000Z
|
python/clockwork/tasks/variant_call_make_jobs_tsv.py
|
oxfordmmm/clockwork
|
be1f6ea49debf8cb2b8f1ba974df2dc567d150fd
|
[
"MIT"
] | null | null | null |
python/clockwork/tasks/variant_call_make_jobs_tsv.py
|
oxfordmmm/clockwork
|
be1f6ea49debf8cb2b8f1ba974df2dc567d150fd
|
[
"MIT"
] | null | null | null |
import os
from clockwork import db, utils, lock_file
def run(options):
lock = lock_file.LockFile(os.path.join(options.pipeline_root, 'variant_call.lock'))
database = db.Db(options.db_config_file)
database.make_variant_call_or_mykrobe_jobs_tsv(
'variant_call',
options.outfile,
options.pipeline_root,
options.reference_id,
options.reference_root,
pipeline_version=options.pipeline_version,
dataset_name=options.dataset_name,
)
database.commit_and_close()
lock.stop()
| 28.842105
| 87
| 0.709854
|
901679fad9a143adabd552efc1da4313ed599978
| 2,465
|
py
|
Python
|
util/build_json.py
|
supernifty/wordnerd
|
7433e6b5d8bb35c4fe56fead3f4b4fb4d9e349af
|
[
"MIT"
] | null | null | null |
util/build_json.py
|
supernifty/wordnerd
|
7433e6b5d8bb35c4fe56fead3f4b4fb4d9e349af
|
[
"MIT"
] | null | null | null |
util/build_json.py
|
supernifty/wordnerd
|
7433e6b5d8bb35c4fe56fead3f4b4fb4d9e349af
|
[
"MIT"
] | null | null | null |
import json
import sys
# builds a sqlite db with words, writes to data.db
result = dict()
jsondata = {'words': [], 'definitions': []}
word_id = 0
def process( index_filename, data_filename ):
global result
global word_id
global jsondata
data = dict()
idx_file = open('wordnet/dict/%s' % index_filename )
data_file = open('wordnet/dict/%s' % data_filename )
print("%s: reading data..." % data_filename)
data_lines = data_file.readlines()
print("%s: processing data..." % data_filename)
count_def = 0
for line in data_lines:
if line.startswith( ' ' ):
continue
line = line.strip()
gloss_fields = line.split( ' | ' )
gloss = gloss_fields[1]
fields = gloss_fields[0].split( ' ' )
data[ fields[0] ] = dict()
data[ fields[0] ][ 'gloss' ] = gloss
count_def += 1
print("%s: %i definitions" % ( data_filename, count_def ))
idx_lines = idx_file.readlines()
print("%s: processing index..." % index_filename)
count_word = 0
count_link = 0
for line in idx_lines:
if line.startswith( ' ' ):
continue
line = line.strip()
fields = line.split( ' ' )
word = fields[0].replace( '_', ' ' )
type = fields[1]
if word not in result:
#cursor.execute( 'insert into words values ( null, ?, 1 )', (word,) )
result[word] = word_id #cursor.lastrowid
jsondata['words'].append({'word': word, 'id': word_id, 'len': len(word)})
word_id += 1
#result[word]['type'] = type
count_word += 1
pointers = int( fields[3] )
synsets = fields[ 6+pointers: ]
# find synsets
for synset in synsets:
if synset in data:
#cursor.execute( 'insert into defs values ( ?, ? )', (result[word], "%s. %s" % ( type, data[synset]['gloss'] ),) )
jsondata['definitions'].append({ 'word_id': result[word], 'def': "%s. %s" % ( type, data[synset]['gloss'] )})
count_link += 1
else:
print("failed to find synset %s for word %s" % ( synset, word ))
print("%s: %i words with %i links" % ( index_filename, count_word, count_link ))
def synonyms( f ):
print("skipping synonyms")
process( 'index.adj', 'data.adj' )
process( 'index.adv', 'data.adv' )
process( 'index.noun', 'data.noun' )
process( 'index.verb', 'data.verb' )
# write json
print("writing...")
with open('data.json', 'w') as fh:
json.dump(jsondata, fh)
print("done")
| 31.602564
| 123
| 0.585396
|
b5690332d31c1d08d73a322c61797181cc23a839
| 193
|
py
|
Python
|
src/division.py
|
dashsanjay/calculationEngine
|
9b7e57a15698e7c07961d178f65d9bd76e1afb78
|
[
"MIT"
] | null | null | null |
src/division.py
|
dashsanjay/calculationEngine
|
9b7e57a15698e7c07961d178f65d9bd76e1afb78
|
[
"MIT"
] | 1
|
2021-11-05T18:14:31.000Z
|
2021-11-06T05:59:17.000Z
|
src/division.py
|
dashsanjay/calculationEngine
|
9b7e57a15698e7c07961d178f65d9bd76e1afb78
|
[
"MIT"
] | 4
|
2021-11-04T04:20:18.000Z
|
2021-11-05T13:42:30.000Z
|
import math
def div(a, b):
#This program divides two numbers and return the result
if b==0:
return "Error" #if denominator is 0 then return error
result = a/b
return result
| 24.125
| 61
| 0.673575
|
0dadf09a19da3bcb93c120d662188d10200647b6
| 772
|
py
|
Python
|
tensorflow_federated/python/learning/metrics/__init__.py
|
tensorflow/federated
|
5748b1a7dc4a5be3b2b9da9959eabe586347078a
|
[
"Apache-2.0"
] | 1,918
|
2019-02-22T21:17:28.000Z
|
2022-03-30T14:49:53.000Z
|
tensorflow_federated/python/learning/metrics/__init__.py
|
tensorflow/federated
|
5748b1a7dc4a5be3b2b9da9959eabe586347078a
|
[
"Apache-2.0"
] | 999
|
2019-02-22T21:47:44.000Z
|
2022-03-31T11:06:42.000Z
|
tensorflow_federated/python/learning/metrics/__init__.py
|
tensorflow/federated
|
5748b1a7dc4a5be3b2b9da9959eabe586347078a
|
[
"Apache-2.0"
] | 498
|
2019-02-22T21:17:56.000Z
|
2022-03-29T02:54:15.000Z
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for working with metrics in Federated Learning algorithms."""
from tensorflow_federated.python.learning.metrics.finalizer import create_keras_metric_finalizer
| 45.411765
| 96
| 0.784974
|
5df6f26c4e0f21ab4ae81b15b4866ca9ad4b0dd8
| 619
|
bzl
|
Python
|
helpers/k8s.bzl
|
digital-peace-talks/argument-analysis-research
|
587b52bedb79a0c9497b8c39ccc70edf4d165249
|
[
"MIT"
] | null | null | null |
helpers/k8s.bzl
|
digital-peace-talks/argument-analysis-research
|
587b52bedb79a0c9497b8c39ccc70edf4d165249
|
[
"MIT"
] | null | null | null |
helpers/k8s.bzl
|
digital-peace-talks/argument-analysis-research
|
587b52bedb79a0c9497b8c39ccc70edf4d165249
|
[
"MIT"
] | null | null | null |
_TEMPLATE = "//k8s:deploy.yaml"
def _template_manifest_impl(ctx):
name = '{}'.format(ctx.label).replace("//cmd/", "").replace("/", "-").split(":", 1)[0]
ctx.actions.expand_template(
template = ctx.file.template,
output = ctx.outputs.source_file,
substitutions = {
"{NAME}": name,
},
)
template_manifest = rule(
implementation = _template_manifest_impl,
attrs = {
"template": attr.label(
default = Label(_TEMPLATE),
allow_single_file = True,
),
},
outputs = {"source_file": "%{name}.yaml"},
)
def template_image(ctx, *args, **kwargs):
print(ctx, args, kwargs)
| 24.76
| 88
| 0.621971
|
262e9f1bf454028942f80a8e6ca5d5ba0b8f50fb
| 24,006
|
py
|
Python
|
test/test_os_file_management.py
|
Wolkabout/WolkConnect-Python-
|
11412e3f88911170f587b5e857d07ab41c8f52b5
|
[
"Apache-2.0"
] | 6
|
2016-12-19T13:36:44.000Z
|
2018-05-10T15:08:15.000Z
|
test/test_os_file_management.py
|
Wolkabout/WolkConnect-Python
|
11412e3f88911170f587b5e857d07ab41c8f52b5
|
[
"Apache-2.0"
] | 5
|
2019-02-23T09:37:12.000Z
|
2021-09-17T13:54:58.000Z
|
test/test_os_file_management.py
|
Wolkabout/WolkConnect-Python-
|
11412e3f88911170f587b5e857d07ab41c8f52b5
|
[
"Apache-2.0"
] | 3
|
2016-08-15T22:19:00.000Z
|
2017-12-28T09:48:37.000Z
|
"""Tests for OSFileManagement."""
# Copyright 2020 WolkAbout Technology s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import unittest
from tempfile import NamedTemporaryFile
from threading import Timer
from unittest.mock import MagicMock
sys.path.append("..") # noqa
from wolk.os_file_management import OSFileManagement
from wolk.model.file_management_error_type import FileManagementErrorType
from wolk.model.file_management_status import FileManagementStatus
from wolk.model.file_management_status_type import FileManagementStatusType
from wolk.model.file_transfer_package import FileTransferPackage
class TestOSFileManagement(unittest.TestCase):
"""Tests for OSFileManagement class."""
def test_configure_no_existing_folder(self):
"""Test configuring file management module and create files folder."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
preferred_package_size = 1000
max_file_size = 1000000
file_directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "files"
)
file_management.configure(
preferred_package_size, max_file_size, file_directory
)
self.assertTrue(os.path.exists(file_directory))
os.rmdir(file_directory)
def test_configure_existing_folder(self):
"""Test configuring file management module with existing folder."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
preferred_package_size = 1000
max_file_size = 1000000
file_directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "files"
)
os.makedirs(os.path.abspath(file_directory))
file_management.configure(
preferred_package_size, max_file_size, file_directory
)
self.assertTrue(os.path.exists(file_directory))
os.rmdir(file_directory)
def test_set_custom_url_downloader(self):
"""Test setting custom URL downloader."""
downloader = True
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.set_custom_url_downloader(downloader)
self.assertEqual(downloader, file_management.download_url)
def test_handle_upload_initiation_not_idle_state(self):
"""Test handle upload initiation when module not idle."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_name = "file"
file_size = 1024
file_hash = "some_hash"
file_management.current_status = 1 # Not None
file_management.handle_upload_initiation(
file_name, file_size, file_hash
)
file_management.status_callback.assert_not_called()
def test_handle_upload_initiation_unconfigured(self):
"""Test handle upload initiation when module not configured."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_name = "file"
file_size = 1024
file_hash = "some_hash"
file_management.handle_upload_initiation(
file_name, file_size, file_hash
)
expected_status = FileManagementStatus(
FileManagementStatusType.ERROR,
FileManagementErrorType.TRANSFER_PROTOCOL_DISABLED,
)
file_management.status_callback.assert_called_once_with(
file_name, expected_status
)
def test_handle_upload_initiation_file_too_big(self):
"""Test handle upload initiation when file too big."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
preferred_package_size = 256
max_file_size = 512
file_directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "files"
)
os.makedirs(os.path.abspath(file_directory))
file_management.configure(
preferred_package_size, max_file_size, file_directory
)
file_name = "file"
file_size = 1024
file_hash = "some_hash"
file_management.handle_upload_initiation(
file_name, file_size, file_hash
)
expected_status = FileManagementStatus(
FileManagementStatusType.ERROR,
FileManagementErrorType.UNSUPPORTED_FILE_SIZE,
)
file_management.status_callback.assert_called_once_with(
file_name, expected_status
)
os.rmdir(file_directory)
def test_handle_upload_initiation_valid_file(self):
"""Test handle upload initiation for valid file."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
preferred_package_size = 256
max_file_size = 1024
file_directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "files"
)
os.makedirs(os.path.abspath(file_directory))
file_management.configure(
preferred_package_size, max_file_size, file_directory
)
file_name = "file"
file_size = 512
file_hash = "some_hash"
file_management.handle_upload_initiation(
file_name, file_size, file_hash
)
expected_status = FileManagementStatus(
FileManagementStatusType.FILE_TRANSFER
)
file_management.status_callback.assert_called_once_with(
file_name, expected_status
)
os.rmdir(file_directory)
file_management.request_timeout.cancel()
file_management.temp_file.close()
def test_handle_upload_initiation_small_file(self):
"""Test handle upload initiation for small file."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
preferred_package_size = 512
max_file_size = 1024
file_directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "files"
)
os.makedirs(os.path.abspath(file_directory))
file_management.configure(
preferred_package_size, max_file_size, file_directory
)
file_name = "file"
file_size = 256
file_hash = "some_hash"
file_management.handle_upload_initiation(
file_name, file_size, file_hash
)
file_management.packet_request_callback.assert_called_once_with(
file_name, 0, file_size + 64
)
os.rmdir(file_directory)
file_management.request_timeout.cancel()
file_management.temp_file.close()
def test_handle_abort_with_temp_file(self):
"""Test aborting file transfer with temp file set."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_management.temp_file = NamedTemporaryFile(
mode="a+b", delete=False
)
file_management.handle_file_upload_abort()
self.assertIsNone(file_management.temp_file)
def test_handle_abort(self):
"""Test aborting file transfer."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_management.handle_file_upload_abort()
self.assertIsNone(file_management.temp_file)
def test_file_package_binary_idle_state(self):
"""Test receiving file package when in idle state."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_management.logger.warning = MagicMock()
file_transfer_package = FileTransferPackage(b"", b"", b"")
file_management.handle_file_binary_response(file_transfer_package)
file_management.logger.warning.assert_called_once()
def test_file_package_binary_cancel_timeout(self):
"""Test receiving file package cancels request timeout timer."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_management.current_status = True
file_management.retry_count = 0
file_management.request_timeout = Timer(60.0, file_management._timeout)
file_management.request_timeout.cancel = MagicMock()
file_transfer_package = FileTransferPackage(b"", b"", b"")
file_management.handle_file_binary_response(file_transfer_package)
file_management.request_timeout.cancel()
file_management.packet_request_callback.assert_called()
# def test_file_package_binary(self):
# """Test receiving file package cancels request timeout timer."""
# mock_status_callback = MagicMock(return_value=None)
# mock_packet_request_callback = MagicMock(return_value=None)
# mock_url_status_callback = MagicMock(return_value=None)
# file_management = OSFileManagement(
# mock_status_callback,
# mock_packet_request_callback,
# mock_url_status_callback,
# )
# file_management.logger.setLevel(logging.CRITICAL)
# file_management.current_status = True
# file_management.retry_count = 0
# data = b"Let's try something else"
# current_hash = hashlib.sha256(data).digest()
# hashlib.sha256 = MagicMock(return_value=b"")
# file_transfer_package = FileTransferPackage(
# 32 * b"\x00", data, current_hash
# )
# file_management.handle_file_binary_response(file_transfer_package)
# file_management.request_timeout.cancel()
# file_management.packet_request_callback.assert_called()
def test_handle_file_url_download_abort(self):
"""Test method resets state."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_management.handle_file_url_download_abort()
self.assertIsNone(file_management.current_status)
def test_get_file_list_current_dir(self):
"""Test get file list for running in current directory."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_list = file_management.get_file_list()
self.assertNotEqual(0, len(file_list))
def test_get_file_list_empty_dir(self):
"""Test get file list for running in current directory."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
os.mkdir("test_dir")
file_management.file_directory = "test_dir"
file_management.logger.setLevel(logging.CRITICAL)
file_list = file_management.get_file_list()
self.assertEqual(0, len(file_list))
os.rmdir("test_dir")
def test_get_file_path_existing(self):
"""Test get file path for existing file."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_name = "test_file"
file_handle = open(file_name, "w")
file_handle.close()
file_path = file_management.get_file_path(file_name)
expected_file_path = os.path.join(
os.path.abspath(os.getcwd()), file_name
)
self.assertEqual(expected_file_path, file_path)
os.remove(file_name)
def test_get_file_path_non_existing(self):
"""Test get file path for non_existing file."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_name = "test_file"
file_path = file_management.get_file_path(file_name)
self.assertIsNone(file_path)
def test_handle_file_list_confirm_does_nothing(self):
"""Test file list confirm doesn't call status callbacks."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_management.handle_file_list_confirm()
file_management.status_callback.assert_not_called()
def test_handle_file_delete_non_existing(self):
"""Test deleting file that doesn't exist."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_name = "test_file"
file_management.handle_file_delete(file_name)
self.assertFalse(os.path.exists(os.path.join(os.getcwd(), file_name)))
def test_handle_file_delete_existing(self):
"""Test deleting file that exists."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_name = "test_file"
file_handle = open(file_name, "w")
file_handle.close()
file_management.handle_file_delete(file_name)
self.assertFalse(os.path.exists(os.path.join(os.getcwd(), file_name)))
def test_handle_file_purge(self):
"""Test deleting all regular files in a directory."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_names = ["file1", "file2", ".special-file"]
files_directory = "test_dir"
os.mkdir(files_directory)
directory_path = os.path.join(os.getcwd(), files_directory)
for file in file_names:
file_handle = open(os.path.join(directory_path, file), "w")
file_handle.close()
file_management.file_directory = directory_path
file_management.handle_file_purge()
self.assertEqual(1, len(os.listdir(directory_path)))
os.remove(os.path.join(directory_path, ".special-file"))
os.rmdir(files_directory)
def test_timeout(self):
"""Test timeout calls abort."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_management.handle_file_upload_abort = MagicMock()
file_management._timeout()
file_management.handle_file_upload_abort.assert_called_once()
def test_handle_file_url_download_init_not_idle(self):
"""Test URL upload init when not in idle state."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_management.logger.warning = MagicMock()
file_management.current_status = True
file_management.handle_file_url_download_initiation("some_url")
file_management.logger.warning.assert_called_once()
def test_handle_file_url_download_init_invalid_url(self):
"""Test URL upload init for invalid url."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
file_management.logger.setLevel(logging.CRITICAL)
file_management.logger.error = MagicMock()
file_management.handle_file_url_download_initiation("some_url")
file_management.logger.error.assert_called_once()
def test_handle_file_url_download_init_valid_url(self):
"""Test URL upload init for valid url."""
mock_status_callback = MagicMock(return_value=None)
mock_packet_request_callback = MagicMock(return_value=None)
mock_url_status_callback = MagicMock(return_value=None)
file_management = OSFileManagement(
mock_status_callback,
mock_packet_request_callback,
mock_url_status_callback,
)
os.mkdir("test_dir")
file_management.file_directory = "test_dir"
file_management.logger.setLevel(logging.CRITICAL)
file_management.logger.error = MagicMock()
licence_url = (
"https://raw.githubusercontent.com/Wolkabout"
+ "/WolkConnect-Python/master/LICENSE"
)
file_management.handle_file_url_download_initiation(licence_url)
status = FileManagementStatus(FileManagementStatusType.FILE_READY)
file_management.url_status_callback.assert_called_with(
licence_url, status, "LICENSE"
)
os.remove(os.path.join(os.getcwd(), "test_dir", "LICENSE"))
os.rmdir(os.path.join(os.getcwd(), "test_dir"))
| 38.907618
| 79
| 0.691994
|
434ff1e7ea6a21ec8114ecdab165b6ad70a88197
| 7,581
|
py
|
Python
|
webapp/graphite/settings.py
|
graphite-server/graphite-web
|
ce985a1be41a26054e44a2d9d39047d6db6cb710
|
[
"Apache-2.0"
] | 1
|
2016-12-20T13:42:47.000Z
|
2016-12-20T13:42:47.000Z
|
webapp/graphite/settings.py
|
graphite-server/graphite-web
|
ce985a1be41a26054e44a2d9d39047d6db6cb710
|
[
"Apache-2.0"
] | null | null | null |
webapp/graphite/settings.py
|
graphite-server/graphite-web
|
ce985a1be41a26054e44a2d9d39047d6db6cb710
|
[
"Apache-2.0"
] | null | null | null |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Django settings for graphite project.
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
import os
import sys
from os.path import abspath, dirname, join
from warnings import warn
from django.core.urlresolvers import reverse_lazy
GRAPHITE_WEB_APP_SETTINGS_LOADED = False
WEBAPP_VERSION = '0.10.0-alpha'
DEBUG = False
JAVASCRIPT_DEBUG = False
# Setting wsgi application object
WSGI_APPLICATION = 'graphite.wsgi.application'
# Filesystem layout
WEB_DIR = dirname( abspath(__file__) )
WEBAPP_DIR = dirname(WEB_DIR)
GRAPHITE_ROOT = dirname(WEBAPP_DIR)
# Initialize additional path variables
# Defaults for these are set after local_settings is imported
STATIC_ROOT = ''
STATIC_URL = '/static/'
URL_PREFIX = ''
CONF_DIR = ''
DASHBOARD_CONF = ''
GRAPHTEMPLATES_CONF = ''
STORAGE_DIR = ''
WHITELIST_FILE = ''
INDEX_FILE = ''
LOG_DIR = ''
CERES_DIR = ''
WHISPER_DIR = ''
RRD_DIR = ''
STANDARD_DIRS = []
CLUSTER_SERVERS = []
# Cluster settings
CLUSTER_SERVERS = []
REMOTE_FIND_TIMEOUT = 3.0
REMOTE_FETCH_TIMEOUT = 6.0
REMOTE_RETRY_DELAY = 60.0
REMOTE_EXCLUDE_LOCAL = False
REMOTE_READER_CACHE_SIZE_LIMIT = 1000
CARBON_METRIC_PREFIX='carbon'
CARBONLINK_HOSTS = ["127.0.0.1:7002"]
CARBONLINK_TIMEOUT = 1.0
CARBONLINK_HASHING_KEYFUNC = None
CARBONLINK_RETRY_DELAY = 15
REPLICATION_FACTOR = 1
MEMCACHE_HOSTS = []
MEMCACHE_KEY_PREFIX = ''
FIND_CACHE_DURATION = 300
FIND_TOLERANCE = 2 * FIND_CACHE_DURATION
DEFAULT_CACHE_DURATION = 60 #metric data and graphs are cached for one minute by default
LOG_CACHE_PERFORMANCE = False
LOG_ROTATE = True
MAX_FETCH_RETRIES = 2
#Remote rendering settings
REMOTE_RENDERING = False #if True, rendering is delegated to RENDERING_HOSTS
RENDERING_HOSTS = []
REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
LOG_RENDERING_PERFORMANCE = False
#Miscellaneous settings
SMTP_SERVER = "localhost"
DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
ALLOW_ANONYMOUS_CLI = True
LOG_METRIC_ACCESS = False
LEGEND_MAX_ITEMS = 10
RRD_CF = 'AVERAGE'
STORAGE_FINDERS = (
'graphite.finders.standard.StandardFinder',
)
#Authentication settings
USE_LDAP_AUTH = False
LDAP_SERVER = "" # "ldapserver.mydomain.com"
LDAP_PORT = 389
LDAP_USE_TLS = False
LDAP_SEARCH_BASE = "" # "OU=users,DC=mydomain,DC=com"
LDAP_BASE_USER = "" # "CN=some_readonly_account,DC=mydomain,DC=com"
LDAP_BASE_PASS = "" # "my_password"
LDAP_USER_QUERY = "" # "(username=%s)" For Active Directory use "(sAMAccountName=%s)"
LDAP_URI = None
#Set this to True to delegate authentication to the web server
USE_REMOTE_USER_AUTHENTICATION = False
REMOTE_USER_BACKEND = "" # Provide an alternate or subclassed backend
# Django 1.5 requires this so we set a default but warn the user
SECRET_KEY = 'UNSAFE_DEFAULT'
# Django 1.5 requires this to be set. Here we default to prior behavior and allow all
ALLOWED_HOSTS = [ '*' ]
# Override to link a different URL for login (e.g. for django_openid_auth)
LOGIN_URL = reverse_lazy('account_login')
# Set the default timezone to UTC
TIME_ZONE = 'UTC'
# Set to True to require authentication to save or delete dashboards
DASHBOARD_REQUIRE_AUTHENTICATION = False
# Require Django change/delete permissions to save or delete dashboards.
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_PERMISSIONS = False
# Name of a group to which the user must belong to save or delete dashboards. Alternative to
# DASHBOARD_REQUIRE_PERMISSIONS, particularly useful when using only LDAP (without Admin app)
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_EDIT_GROUP = None
DATABASES = None
# If using rrdcached, set to the address or socket of the daemon
FLUSHRRDCACHED = ''
## Load our local_settings
try:
from graphite.local_settings import * # noqa
except ImportError:
print >> sys.stderr, "Could not import graphite.local_settings, using defaults!"
## Load Django settings if they werent picked up in local_settings
if not GRAPHITE_WEB_APP_SETTINGS_LOADED:
from graphite.app_settings import * # noqa
## Set config dependent on flags set in local_settings
# Path configuration
if not STATIC_ROOT:
STATIC_ROOT = join(GRAPHITE_ROOT, 'static')
if not CONF_DIR:
CONF_DIR = os.environ.get('GRAPHITE_CONF_DIR', join(GRAPHITE_ROOT, 'conf'))
if not DASHBOARD_CONF:
DASHBOARD_CONF = join(CONF_DIR, 'dashboard.conf')
if not GRAPHTEMPLATES_CONF:
GRAPHTEMPLATES_CONF = join(CONF_DIR, 'graphTemplates.conf')
if not STORAGE_DIR:
STORAGE_DIR = os.environ.get('GRAPHITE_STORAGE_DIR', join(GRAPHITE_ROOT, 'storage'))
if not WHITELIST_FILE:
WHITELIST_FILE = join(STORAGE_DIR, 'lists', 'whitelist')
if not INDEX_FILE:
INDEX_FILE = join(STORAGE_DIR, 'index')
if not LOG_DIR:
LOG_DIR = join(STORAGE_DIR, 'log', 'webapp')
if not WHISPER_DIR:
WHISPER_DIR = join(STORAGE_DIR, 'whisper/')
if not CERES_DIR:
CERES_DIR = join(STORAGE_DIR, 'ceres/')
if not RRD_DIR:
RRD_DIR = join(STORAGE_DIR, 'rrd/')
if not STANDARD_DIRS:
try:
import whisper # noqa
if os.path.exists(WHISPER_DIR):
STANDARD_DIRS.append(WHISPER_DIR)
except ImportError:
print >> sys.stderr, "WARNING: whisper module could not be loaded, whisper support disabled"
try:
import rrdtool # noqa
if os.path.exists(RRD_DIR):
STANDARD_DIRS.append(RRD_DIR)
except ImportError:
pass
if DATABASES is None:
DATABASES = {
'default': {
'NAME': join(STORAGE_DIR, 'graphite.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
}
# Handle URL prefix in static files handling
if URL_PREFIX and not STATIC_URL.startswith(URL_PREFIX):
STATIC_URL = '/{0}{1}'.format(URL_PREFIX.strip('/'), STATIC_URL)
# Default sqlite db file
# This is set here so that a user-set STORAGE_DIR is available
if 'sqlite3' in DATABASES.get('default',{}).get('ENGINE','') \
and not DATABASES.get('default',{}).get('NAME'):
DATABASES['default']['NAME'] = join(STORAGE_DIR, 'graphite.db')
# Caching shortcuts
if MEMCACHE_HOSTS:
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': MEMCACHE_HOSTS,
'TIMEOUT': DEFAULT_CACHE_DURATION,
'KEY_PREFIX': MEMCACHE_KEY_PREFIX,
}
# Authentication shortcuts
if USE_LDAP_AUTH and LDAP_URI is None:
LDAP_URI = "ldap://%s:%d/" % (LDAP_SERVER, LDAP_PORT)
if USE_REMOTE_USER_AUTHENTICATION or REMOTE_USER_BACKEND:
MIDDLEWARE_CLASSES += ('django.contrib.auth.middleware.RemoteUserMiddleware',)
if REMOTE_USER_BACKEND:
AUTHENTICATION_BACKENDS.insert(0,REMOTE_USER_BACKEND)
else:
AUTHENTICATION_BACKENDS.insert(0,'django.contrib.auth.backends.RemoteUserBackend')
if USE_LDAP_AUTH:
AUTHENTICATION_BACKENDS.insert(0,'django.contrib.auth.backends.ModelBackend')
AUTHENTICATION_BACKENDS.insert(0,'django_auth_ldap.backend.LDAPBackend')
if SECRET_KEY == 'UNSAFE_DEFAULT':
warn('SECRET_KEY is set to an unsafe default. This should be set in local_settings.py for better security')
USE_TZ = True
| 32.122881
| 109
| 0.758211
|
f5878773f846efd53f3c21d85b6c86a0a0c4ba01
| 1,212
|
py
|
Python
|
userbot/plugins/gdaftrnoon.py
|
aksr-aashish/FIREXUSERBOT
|
dff0b7bf028cb27779626ce523402346cc990402
|
[
"MIT"
] | null | null | null |
userbot/plugins/gdaftrnoon.py
|
aksr-aashish/FIREXUSERBOT
|
dff0b7bf028cb27779626ce523402346cc990402
|
[
"MIT"
] | 1
|
2022-01-09T11:35:06.000Z
|
2022-01-09T11:35:06.000Z
|
userbot/plugins/gdaftrnoon.py
|
aksr-aashish/FIREXUSERBOT
|
dff0b7bf028cb27779626ce523402346cc990402
|
[
"MIT"
] | null | null | null |
from . import *
@bot.on(admin_cmd(pattern="gdaftrnoon(.*)"))
async def xd(event):
await event.edit("Sending To all Group good AfterNoon")
event.pattern_match.group(1)
async for tele in borg.iter_dialogs():
if tele.is_group:
chat = tele.id
lol = 0
done = 0
try:
await bot.send_message(
chat,
'╭━━━┳━━━┳━━━┳━━━╮\n┃╭━╮┃╭━╮┃╭━╮┣╮╭╮┃\n┃┃╱╰┫┃╱┃┃┃╱┃┃┃┃┃┃\n┃┃╭━┫┃╱┃┃┃╱┃┃┃┃┃┃\n┃╰┻━┃╰━╯┃╰━╯┣╯╰╯┃\n╰━━━┻━━━┻━━━┻━━━╯\n╭━━━╮\n┃╭━╮┃\n┃┃╱┃┃\n┃╰━╯┃\n┃╭━╮┃\n╰╯╱╰╯\n╭━━━╮\n┃╭━━╯\n┃╰━━╮\n┃╭━━╯\n┃┃\n╰╯\n╭━━━━╮\n┃╭╮╭╮┃\n╰╯┃┃╰╯\n╱╱┃┃\n╱╱┃┃\n╱╱╰╯\n╭━━━╮\n┃╭━━╯\n┃╰━━╮\n┃╭━━╯\n┃╰━━╮\n╰━━━╯\n╭━━━╮\n┃╭━╮┃\n┃╰━╯┃\n┃╭╮╭╯\n┃┃┃╰╮\n╰╯╰━╯\n╭━╮╱╭╮\n┃┃╰╮┃┃\n┃╭╮╰╯┃\n┃┃╰╮┃┃\n┃┃╱┃┃┃\n╰╯╱╰━╯\n╭━━━╮\n┃╭━╮┃\n┃┃╱┃┃\n┃┃╱┃┃\n┃╰━╯┃\n╰━━━╯\n╭━━━╮\n┃╭━╮┃\n┃┃╱┃┃\n┃┃╱┃┃\n┃╰━╯┃\n╰━━━╯\n╭━╮╱╭╮\n┃┃╰╮┃┃\n┃╭╮╰╯┃\n┃┃╰╮┃┃\n┃┃╱┃┃┃\n╰╯╱╰━╯',
)
done += 1
except:
lol += 1
await event.reply('#Smoothest & Fastest [FIRE-X](https://t.me/FirexSupport)')
CmdHelp("gdaftrnoon").add_command(
"gdaftrnoon", None, "Wishs Good Afternoon in all groups just one command"
).add()
| 43.285714
| 519
| 0.356436
|
97af59d482d0997a9e5f45eb6466d03db9abe375
| 3,360
|
py
|
Python
|
tests/test_parser.py
|
wenisman/grafyaml
|
26e53da91426896d5877146fe3ec339a61ede8da
|
[
"Apache-2.0"
] | 8
|
2020-10-07T14:24:24.000Z
|
2022-03-13T16:37:55.000Z
|
tests/test_parser.py
|
wenisman/grafyaml
|
26e53da91426896d5877146fe3ec339a61ede8da
|
[
"Apache-2.0"
] | 1
|
2022-03-14T22:49:23.000Z
|
2022-03-14T22:49:23.000Z
|
tests/test_parser.py
|
wenisman/grafyaml
|
26e53da91426896d5877146fe3ec339a61ede8da
|
[
"Apache-2.0"
] | 5
|
2021-03-21T18:54:21.000Z
|
2022-02-15T22:59:53.000Z
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from testtools import TestCase
from grafana_dashboards import parser
class TestCaseParser(TestCase):
def setUp(self):
super(TestCaseParser, self).setUp()
self.parser = parser.YamlParser()
def test_get_dashboard_empty(self):
self._get_empty_dashboard("foobar")
def test_parse_multiple(self):
path = os.path.join(
os.path.dirname(__file__), "fixtures/parser/dashboard-0001.yaml"
)
self.parser.parse(path)
dashboard = {
"foobar": {
"rows": [],
"templating": {
"enabled": False,
"list": [],
},
"timezone": "utc",
"title": "foobar",
},
"new-dashboard": {
"rows": [],
"templating": {
"enabled": False,
"list": [],
},
"timezone": "utc",
"title": "New dashboard",
},
}
# Get parsed dashboard
res, md5 = self.parser.get_dashboard("new-dashboard")
self.assertEqual(res, dashboard["new-dashboard"])
# Check for a dashboard that does not exist
self._get_empty_dashboard("foobar")
# Parse another file to ensure we are appending data.
path = os.path.join(
os.path.dirname(__file__), "fixtures/parser/dashboard-0002.yaml"
)
self.parser.parse(path)
res, md5 = self.parser.get_dashboard("foobar")
self.assertEqual(res, dashboard["foobar"])
# Ensure our first dashboard still exists.
res, md5 = self.parser.get_dashboard("new-dashboard")
self.assertEqual(res, dashboard["new-dashboard"])
def test_parse_duplicate(self):
path = os.path.join(
os.path.dirname(__file__), "fixtures/parser/dashboard-0001.yaml"
)
self.parser.parse(path)
dashboard = {
"new-dashboard": {
"rows": [],
"templating": {
"enabled": False,
"list": [],
},
"timezone": "utc",
"title": "New dashboard",
},
}
# Get parsed dashboard
res, md5 = self.parser.get_dashboard("new-dashboard")
self.assertEqual(res, dashboard["new-dashboard"])
path = os.path.join(
os.path.dirname(__file__), "fixtures/parser/dashboard-0003.yaml"
)
# Fail to parse duplicate dashboard
self.assertRaises(Exception, self.parser.parse, path)
def _get_empty_dashboard(self, name):
res, md5 = self.parser.get_dashboard(name)
self.assertEqual(res, None)
| 31.698113
| 76
| 0.56369
|
2ef0a118b29c2d6172f5cbbf76ae21a64df1a657
| 311
|
py
|
Python
|
pyxif/__init__.py
|
zenwerk/Pyxif
|
775f9329881368fed83e8c8b1a45321e627da40f
|
[
"MIT"
] | 1
|
2022-02-15T11:32:18.000Z
|
2022-02-15T11:32:18.000Z
|
pyxif/__init__.py
|
zenwerk/Pyxif
|
775f9329881368fed83e8c8b1a45321e627da40f
|
[
"MIT"
] | null | null | null |
pyxif/__init__.py
|
zenwerk/Pyxif
|
775f9329881368fed83e8c8b1a45321e627da40f
|
[
"MIT"
] | null | null | null |
from ._remove import remove
from ._load_and_dump import load, dump, ImageGroup, PhotoGroup, GPSInfoGroup
from ._transplant import transplant
from ._insert import insert
try:
from ._thumbnail import thumbnail
except ImportError:
print("'thumbnail' function depends on PIL or Pillow.")
VERSION = '0.4.3'
| 28.272727
| 76
| 0.778135
|
9ae5d8a6206320c841ce0a1490c09f5fc67db299
| 4,723
|
py
|
Python
|
encode/mpeg-pcc-tmc13/encode_wrapper.py
|
haiqwang/pcb
|
84eeb5ed9c758fd4a2f94c29ab8698ca84c73aea
|
[
"Apache-2.0"
] | 1
|
2021-04-25T01:39:26.000Z
|
2021-04-25T01:39:26.000Z
|
encode/mpeg-pcc-tmc13/encode_wrapper.py
|
haiqwang/pcb
|
84eeb5ed9c758fd4a2f94c29ab8698ca84c73aea
|
[
"Apache-2.0"
] | null | null | null |
encode/mpeg-pcc-tmc13/encode_wrapper.py
|
haiqwang/pcb
|
84eeb5ed9c758fd4a2f94c29ab8698ca84c73aea
|
[
"Apache-2.0"
] | 1
|
2021-04-26T12:04:29.000Z
|
2021-04-26T12:04:29.000Z
|
'''
Python wrapper to encode with MPEG G-PCC standard
Author: Haiqiang Wang
Data: 04/22/2021
'''
import os
import re
import subprocess
def make_cfg(gpcc_bin_path, ref_path, cfg_dir, output_dir, g, c):
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
src_name = re.split('/|\.', ref_path)[-2]
recon_name = '{src}_g_{g}_c_{c}'.format(src=src_name, g=g, c=c)
recon_path = os.path.join(output_dir, '{}.ply'.format(recon_name))
bin_path = os.path.join(output_dir, '{}.bin'.format(recon_name))
log_path = os.path.join(output_dir, '{}.log'.format(recon_name))
cfg_path = os.path.join(cfg_dir, '{}.cfg'.format(recon_name))
rst = []
rst.append('uncompressedDataPath: {}'.format(ref_path))
rst.append('reconstructedDataPath: {}'.format(recon_path))
rst.append('compressedStreamPath: {}'.format(bin_path))
rst.append('mode: 0')
rst.append('trisoupNodeSizeLog2: 0')
rst.append('mergeDuplicatedPoints: 1')
rst.append('neighbourAvailBoundaryLog2: 8')
rst.append('intra_pred_max_node_size_log2: 6')
rst.append('srcResolution: 0')
rst.append('outputResolution: 0')
rst.append('maxNumQtBtBeforeOt: 4')
rst.append('minQtbtSizeLog2: 0')
rst.append('planarEnabled: 1')
rst.append('planarModeIdcmUse: 0')
rst.append('convertPlyColourspace: 1')
rst.append('transformType: 2')
rst.append('numberOfNearestNeighborsInPrediction: 3')
rst.append('levelOfDetailCount: 11')
rst.append('lodDecimator: 0')
rst.append('adaptivePredictionThreshold: 64')
rst.append('qpChromaOffset: 0')
rst.append('bitdepth: 8')
rst.append('positionQuantizationScale: {}'.format(g))
rst.append('qp: {}'.format(c))
rst.append('attribute: color')
with open(cfg_path, 'w') as f:
for line in rst:
f.write("%s\n" % line)
cmd = "{exec_path} --config={cfg_path} >> {log_path}".format(exec_path=gpcc_bin_path, cfg_path=cfg_path, log_path=log_path)
# print(cmd)
return cmd
def process_one_depth(gpcc_bin_path, ref_dir, cfg_dir, output_dir, seq, g, c):
cmd = []
for _seq in seq:
ref_path = os.path.join(ref_dir, _seq)
for _g in g:
for _c in c:
_cmd = make_cfg(gpcc_bin_path, ref_path, cfg_dir, output_dir, _g, _c)
cmd.append(_cmd)
return cmd
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
gpcc_bin_path = os.path.abspath(os.path.join(dir_path, '../../mpeg-pcc-tmc13/build/tmc3/tmc3'))
ref_dir = os.path.abspath(os.path.join(dir_path, '../../data/mpeg/ref/'))
cfg_dir = os.path.abspath(os.path.join(dir_path, './cfg'))
output_dir = os.path.abspath(os.path.join(dir_path, './ply'))
seq_15 = []
g_15 = [1.0, 1.0/512, 1.0/256, 1.0/64, 1.0/32, 1.0/8, 1.0/4]
seq_14 = []
g_14 = [1.0, 1.0/256, 1.0/128, 1.0/64, 1.0/16, 1.0/8, 1.0/4]
seq_13 = []
g_13 = [1.0, 1.0/64, 1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0/2]
seq_12 = ['boxer_viewdep_vox12.ply',
'Thaidancer_viewdep_vox12.ply']
g_12 = [1.0, 1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0/2, 3.0/4]
seq_11 = ['basketball_player_vox11_00000200.ply',
'dancer_vox11_00000001.ply']
g_11 = [1.0, 1.0/16, 1.0/8, 1.0/4, 1.0/2, 3.0/4, 7.0/8]
seq_10 = ['queen_0200.ply',
'soldier_vox10_0690.ply',
'redandblack_vox10_1550.ply',
'loot_vox10_1200.ply',
'longdress_vox10_1300.ply']
g_10 = [1.0, 1.0/8, 1.0/4, 1.0/2, 3.0/4, 7.0/8, 15.0/16]
c = [4, 22, 28, 34, 40, 46, 51]
cmd_all = []
if len(seq_15) > 0:
cmd = process_one_depth(gpcc_bin_path, ref_dir, cfg_dir, output_dir, seq_15, g_15, c)
cmd_all.extend(cmd)
if len(seq_14) > 0:
cmd = process_one_depth(gpcc_bin_path, ref_dir, cfg_dir, output_dir, seq_14, g_14, c)
cmd_all.extend(cmd)
if len(seq_13) > 0:
cmd = process_one_depth(gpcc_bin_path, ref_dir, cfg_dir, output_dir, seq_13, g_13, c)
cmd_all.extend(cmd)
if len(seq_12) > 0:
cmd = process_one_depth(gpcc_bin_path, ref_dir, cfg_dir, output_dir, seq_12, g_12, c)
cmd_all.extend(cmd)
if len(seq_11) > 0:
cmd = process_one_depth(gpcc_bin_path, ref_dir, cfg_dir, output_dir, seq_11, g_11, c)
# cmd_all.extend(cmd)
if len(seq_10) > 0:
cmd = process_one_depth(gpcc_bin_path, ref_dir, cfg_dir, output_dir, seq_10, g_10, c)
# cmd_all.extend(cmd)
with open('run_gpcc_encode.sh', 'w') as f:
for item in cmd_all:
print(item)
f.write('%s & \n' % item)
| 33.735714
| 127
| 0.620792
|
f156086a4021096c72fa404f371f4156551b1ceb
| 23,371
|
py
|
Python
|
utils/imagetools.py
|
s0hv/Not-a-bo
|
6a6d5c4dfbfd7c0635e200d70fcaf7c12f8a009f
|
[
"MIT"
] | 6
|
2019-11-15T00:57:15.000Z
|
2022-01-08T08:11:08.000Z
|
utils/imagetools.py
|
s0hv/Not-a-bo
|
6a6d5c4dfbfd7c0635e200d70fcaf7c12f8a009f
|
[
"MIT"
] | null | null | null |
utils/imagetools.py
|
s0hv/Not-a-bo
|
6a6d5c4dfbfd7c0635e200d70fcaf7c12f8a009f
|
[
"MIT"
] | 3
|
2018-12-25T14:42:08.000Z
|
2021-07-22T15:56:43.000Z
|
"""
MIT License
Copyright (c) 2017 s0hvaperuna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import os
import subprocess
from io import BytesIO
from shlex import split
from sys import platform
from threading import Lock
import aiohttp
import geopatterns
import magic
import numpy as np
from PIL import Image, ImageChops, ImageDraw, ImageSequence
from colorthief import ColorThief as CF
from colour import Color
from geopatterns.utils import promap
from numpy import sqrt
from bot.exceptions import (ImageSizeException, ImageResizeException,
TooManyFrames, ImageDownloadError,
ImageProcessingError)
from bot.globals import IMAGES_PATH
# import cv2
cv2 = None # Remove cv2 import cuz it takes forever to import
logger = logging.getLogger('terminal')
MAGICK = os.environ.get('MAGICK_PREFIX', '')
MAX_COLOR_DIFF = 2.82842712475 # Biggest value produced by color_distance
GLOW_LOCK = Lock()
TRIMMING_LOCK = Lock()
def make_shiftable(color):
# Color stays almost the same when it's too close to white or black
max_dist = MAX_COLOR_DIFF * 0.05
if color_distance(color, Color('white')) < max_dist:
color.set_hex('#EEEEEE')
elif color_distance(color, Color('black')) < max_dist:
color.set_hex('#333333')
return color
class ColorThief(CF):
def __init__(self, img): # skipcq: PYL-W0231
if isinstance(img, Image.Image):
self.image = img
else:
self.image = Image.open(img)
class GeoPattern(geopatterns.GeoPattern):
available_generators = [
'bricks',
'hexagons',
'overlapping_circles',
'overlapping_rings',
'plaid',
'plus_signs',
'rings',
'sinewaves',
'squares',
'triangles',
'xes'
]
def __init__(self, string, generator=None, color=None, scale=None,
opacity=1.0):
if isinstance(color, Color):
color = color.get_hex_l()
super().__init__(string, generator=generator, color=color, scale=scale,
opacity=opacity)
def generate_background(self, base_color, randomize_hue):
hue_offset = promap(int(self.hash[14:][:3], 16), 0, 4095, 0, 359)
sat_offset = int(self.hash[17:][:1], 16)
if randomize_hue:
base_color.hue = base_color.hue - hue_offset
if sat_offset % 2:
base_color.saturation = min(base_color.saturation + sat_offset / 100, 1.0)
else:
base_color.saturation = abs(base_color.saturation - sat_offset / 100)
rgb = base_color.rgb
r = int(round(rgb[0] * 255))
g = int(round(rgb[1] * 255))
b = int(round(rgb[2] * 255))
return self.svg.rect(0, 0, '100%', '100%', **{
'fill': 'rgba({}, {}, {}, {})'.format(r, g, b, self.opacity)
})
# https://stackoverflow.com/a/3753428/6046713
def replace_color(im, color1, color2):
"""
Args:
im: Image
color1: tuple of 3 integers. Color to be replaced
color2: tuple of 3 integers. Color that replaces the other color
Returns:
new image object
"""
im = im.convert('RGBA')
data = np.array(im) # "data" is a height x width x 4 numpy array
red, green, blue, _ = data.T # Temporarily unpack the bands for readability skipcq: PYL-E0633
r, g, b = color1
# Replace white with red... (leaves alpha values alone...)
white_areas = (red == r) & (blue == b) & (green == g)
data[..., :-1][white_areas.T] = color2 # Transpose back needed
im = Image.fromarray(data)
return im
def sepia(im, strength=0.75):
image = BytesIO()
im.save(image, 'PNG')
args = '{}convert - -sepia-tone {:.0%} -evaluate Uniform-noise 7 png:-'.format(MAGICK, strength)
p = subprocess.Popen(args.split(' '), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write(image.getvalue())
out, err = p.communicate()
buff = BytesIO(out)
del image
return Image.open(buff)
# http://effbot.org/zone/pil-sepia.htm
def sepia_filter(im):
def make_linear_ramp(white):
# putpalette expects [r,g,b,r,g,b,...]
ramp = []
r, g, b = white
for i in range(255):
if i == 0:
i = 100
elif i == 254:
i = 200
ramp.extend((int(r * i / 255), int(g * i / 255), int(b * i / 255)))
return ramp
# make sepia ramp (tweak color as necessary)
sepia = make_linear_ramp((250, 225, 175))
# convert to grayscale
if im.mode != "L":
im = im.convert("L")
# optional: apply contrast enhancement here, e.g.
#im = ImageOps.autocontrast(im)
# apply sepia palette
im.putpalette(sepia)
return im
def trim_image(im):
ulc = im.getpixel((0, 0))
if not (ulc == im.getpixel((0, im.height-1)) or ulc == im.getpixel((im.width-1, im.height-1))
or ulc == im.getpixel((im.width-1, 0))):
return im
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
# http://stackoverflow.com/a/9085524/6046713
def color_distance(c1, c2):
rmean = (c1.red + c2.red) / 2
r = c1.red - c2.red
g = c1.green - c2.green
b = c1.blue - c2.blue
return sqrt((int((512+rmean)*r*r) >> 8) + 4*g*g + (int((767-rmean)*b*b) >> 8))
# http://stackoverflow.com/a/38478744/6046713
def complementary_color(my_hex):
"""Returns complementary RGB color"""
if my_hex[0] == '#':
my_hex = my_hex[1:]
rgb = (my_hex[0:2], my_hex[2:4], my_hex[4:6])
comp = ['%02X' % (255 - int(a, 16)) for a in rgb]
return '#' + ''.join(comp)
# http://stackoverflow.com/a/24164270/6046713
def bg_from_texture(img, size, mode='RGB'):
# The width and height of the background tile
bg_w, bg_h = img.size
# Creates a new empty image, RGB mode, and size of size
new_im = Image.new(mode, size)
# The width and height of the new image
w, h = new_im.size
# Iterate through a grid, to place the background tile
for i in range(0, w, bg_w):
for j in range(0, h, bg_h):
# paste the image at location i, j:
new_im.paste(img, (i, j))
return new_im
def get_color(img, quality=5):
cf = ColorThief(img)
return cf.get_color(quality)
def get_palette(img, colors=6, quality=5):
cf = ColorThief(img)
return cf.get_palette(colors, quality=quality)
def create_geopattern_background(size, s, color=None, generator='overlapping_circles'):
pattern = GeoPattern(s, generator=generator, color=color)
args = '{}convert -size 100x100 svg:- png:-'.format(MAGICK)
p = subprocess.Popen(args.split(' '), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(pattern.svg_string.encode('utf-8'))
out, err = p.communicate()
buff = BytesIO(out)
img = Image.open(buff)
img = bg_from_texture(img, size)
return img, pattern.base_color
# http://stackoverflow.com/a/29314286/6046713
# http://stackoverflow.com/a/41048793/6046713
def remove_background(image, blur=21, canny_thresh_1=10, canny_thresh_2=200,
mask_dilate_iter=10, mask_erode_iter=10):
global cv2 # skipcq: PYL-W0603
if cv2 is None:
try:
import cv2
except ImportError:
cv2 = None
if cv2 is None:
return image
# Parameters
BLUR = blur
CANNY_THRESH_1 = canny_thresh_1
CANNY_THRESH_2 = canny_thresh_2
MASK_DILATE_ITER = mask_dilate_iter
MASK_ERODE_ITER = mask_erode_iter
p = os.path.join(IMAGES_PATH, 'trimmed.png')
with TRIMMING_LOCK:
try:
image.save(p)
# Processing
# Read image
if platform == 'win32':
p = p.replace('\\', '/') # Windows paths don't work in cv2
img = cv2.imread(p)
except OSError:
return image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Edge detection
edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
edges = cv2.dilate(edges, None)
edges = cv2.erode(edges, None)
# Find contours in edges, sort by area
contour_info = []
contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for c in contours:
contour_info.append((
c,
cv2.isContourConvex(c),
cv2.contourArea(c),
))
contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
max_contour = contour_info[0]
# Create empty mask, draw filled polygon on it corresponding to largest contour
# Mask is black, polygon is white
mask = np.zeros(edges.shape)
cv2.fillConvexPoly(mask, max_contour[0], 255)
# Smooth mask, then blur it
mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
img = img.astype('float32') / 255.0 # for easy blending
# split image into channels
c_red, c_green, c_blue = cv2.split(img)
# merge with mask got on one of a previous steps
img_a = cv2.merge((c_red, c_green, c_blue, mask.astype('float32') / 255.0))
_, buf = cv2.imencode('.png', img_a * 255)
buffer = BytesIO(bytearray(buf))
return Image.open(buffer)
async def image_from_url(url, client):
return Image.open(await raw_image_from_url(url, client))
async def raw_image_from_url(url, client, get_mime=False):
if not url:
raise ImageDownloadError('No images found', '')
url = url.strip('\u200b \n')
data = None
mime_type = None
try:
async with client.get(url) as r:
logger.debug('Downloading image url {}'.format(url))
if not r.headers.get('Content-Type', '').startswith('image'):
raise ImageDownloadError("url isn't an image (Invalid header)", url)
max_size = 8000000
size = int(r.headers.get('Content-Length', 0))
if size > max_size:
raise ImageDownloadError('image too big', url)
data = BytesIO()
chunk = 4096
total = 0
async for d in r.content.iter_chunked(chunk):
if total == 0:
mime_type = magic.from_buffer(d, mime=True)
total += chunk
if not mime_type.startswith('image') and mime_type != 'application/octet-stream':
raise ImageDownloadError("url isn't an image", url)
total += chunk
if total > max_size:
raise ImageDownloadError('image is too big', url)
data.write(d)
data.seek(0)
except aiohttp.ClientError:
logger.exception(f'Could not download image {url}')
raise ImageDownloadError('unknown error', url)
if data is None:
raise ImageDownloadError('unknown error', url)
if get_mime:
return data, mime_type
return data
def shift_color(color, amount):
if amount == 0:
return color
def shift_value(val):
if val <= 0.5:
return val * 0.035 * (1 + (amount/20))
else:
return val * 0.035 * (1 - (amount/20))
color = make_shiftable(color)
sat = color.saturation
hue = color.hue
if round(hue, 3) == 0:
hue = 200
if round(sat, 3) == 0:
sat = 0.1
color.saturation = min(abs(sat * (1 + amount/20)), 1.0)
color.hue = shift_value(hue)
return color
def create_glow(img, amount):
image_path = os.path.join(IMAGES_PATH, 'text.png')
glow_path = os.path.join(IMAGES_PATH, 'glow.png')
with GLOW_LOCK:
try:
img.save(image_path, 'PNG')
args = '{}convert {} -blur 0x{} {}'.format(MAGICK, image_path, amount, glow_path)
subprocess.call(args.split(' '))
args = '{}composite -compose multiply {} {} png:-'.format(MAGICK, glow_path, image_path)
p = subprocess.Popen(args.split(' '), stdout=subprocess.PIPE)
out = p.stdout.read()
buff = BytesIO(out)
except Exception:
logger.exception('Could not create glow')
return img
return Image.open(buff)
def create_shadow(img, percent, opacity, x, y):
import shlex
args = '{}convert - ( +clone -background black -shadow {}x{}+{}+{} ) +swap ' \
'-background transparent -layers merge +repage png:-'.format(MAGICK, percent, opacity, x, y)
p = subprocess.Popen(shlex.split(args), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
stdin = p.stdin
image = BytesIO()
img.save(image, format='PNG')
stdin.write(image.getvalue())
del image
out, err = p.communicate()
buffer = BytesIO(out)
img = Image.open(buffer)
return img
def crop_to_square(img, crop_to_center=True):
if img.width == img.height:
return img
side = min(img.size)
size = (side, side)
if crop_to_center:
w, h = img.size
x = 0
y = 0
if side == w:
y = (h-side)//2
else:
x = (w-side)//2
img = img.crop((x, y, *size))
else:
img = img.crop((0, 0, *size))
return img
def resize_keep_aspect_ratio(img, new_size, crop_to_size=False, can_be_bigger=True,
center_cropped=False, background_color=None,
resample=Image.NEAREST, max_pixels=8294400):
"""
Args:
img: Image to be cropped
new_size: Size of the new image
crop_to_size: after resizing crop image so it's exactly the specified size
can_be_bigger:
Tells if the image can be bigger than the requested size
When true image will be as big or bigger in one dimension than requested size
center_cropped: Center the image. Used in combination with crop_to_size
since otherwise the added or removed space will only be in the
bottom right corner
background_color: Color of the background
resample: The type of resampling to use
max_pixels: Maximum amount of pixels the image can have
Returns:
Image.Image
"""
x, y = img.size
if 0 < max_pixels < x * y: # More pixels than in a 4k pic is a max by default
raise ImageSizeException(x * y, max_pixels)
new_x = new_size[0]
new_y = new_size[1]
if new_x is not None and new_y is not None:
x_m = x / new_x
y_m = y / new_y
check = y_m <= x_m if can_be_bigger else y_m >= x_m
elif new_x is None:
check = True
elif new_y is None:
check = False
if check:
m = new_size[1] / y
else:
m = new_size[0] / x
new_x, new_y = int(x * m), int(y * m)
if max_pixels > 0 and new_x * new_y > max_pixels//2:
raise ImageResizeException(new_x * new_y, max_pixels//2)
img = img.resize((new_x, new_y), resample=resample)
if crop_to_size:
if center_cropped:
w, h = img.size
x_ = 0
y_ = 0
if w != x:
x_ = -int((new_size[0] - w)/2)
if h != y:
y_ = -int((new_size[1] - h)/2)
img = img.crop((x_, y_, new_size[0] + x_, new_size[1] + y_))
else:
img = img.crop((0, 0, *new_size))
if background_color is not None:
im = Image.new(img.mode, img.size, background_color)
im.paste(img, mask=img)
img = im
return img
def create_text(s, font, fill, canvas_size, point=(10, 10)):
text = Image.new('RGBA', canvas_size)
draw = ImageDraw.Draw(text)
draw.text(point, s, fill, font=font)
return text
def get_duration(frames):
if isinstance(frames[0].info.get('duration', None), list):
duration = frames[0].info['duration']
else:
duration = [frame.info.get('duration', 20) for frame in frames]
return duration
def fixed_gif_frames(img, func=None):
if func is None:
def func(im): # skipcq: PYL-E0102
return im.copy()
frames = []
while True:
try:
frames.append(func(img))
except ValueError as e:
e = str(e)
if e.startswith('tile cannot'):
raise ImageProcessingError(str(e))
else:
raise ImageProcessingError()
except: # skipcq: FLK-E722
raise ImageProcessingError()
try:
img.seek(img.tell() + 1)
except EOFError:
frames[-1] = func(img)
break
return frames
def get_frames(img):
return fixed_gif_frames(img)
def convert_frames(img, mode='RGBA'):
def func(img):
return img.convert(mode)
return fixed_gif_frames(img, func)
def func_to_gif(img, f, get_raw=True):
if max(img.size) > 600:
frames = [resize_keep_aspect_ratio(frame.convert('RGBA'), (600, 600), can_be_bigger=False, resample=Image.BILINEAR)
for frame in ImageSequence.Iterator(img)]
else:
frames = [frame.convert('RGBA') for frame in ImageSequence.Iterator(img)]
if len(frames) > 150:
raise TooManyFrames('Maximum amount of frames is 100')
images = []
for frame in frames:
images.append(f(frame))
data = BytesIO()
duration = get_duration(frames)
images[0].info['duration'] = duration
images[0].save(data, format='GIF', duration=duration, save_all=True, append_images=images[1:], loop=65535)
data.seek(0)
if get_raw:
data = optimize_gif(data.getvalue())
else:
data = Image.open(data)
return data
def gradient_flash(im, get_raw=True, transparency=None):
"""
When get_raw is True gif is optimized with magick fixing some problems that PIL
creates. It is the suggested method of using this funcion
"""
frames = []
if max(im.size) > 600:
def f(frame):
return resize_keep_aspect_ratio(frame.convert('RGBA'), (600, 600), can_be_bigger=False, resample=Image.BILINEAR)
else:
def f(frame):
return frame.convert('RGBA')
while True:
frames.append(f(im))
if len(frames) > 150:
raise TooManyFrames('fuck this')
try:
im.seek(im.tell() + 1)
except EOFError:
frames[-1] = f(im)
break
if transparency is None and im.mode == 'RGBA' or im.info.get('background', None) is not None or im.info.get('transparency', None) is not None:
transparency = True
extended = 1
while len(frames) <= 20:
frames.extend([frame.copy() for frame in frames])
extended += 1
gradient = Color('red').range_to('#ff0004', len(frames))
frames_ = zip(frames, gradient)
images = []
try:
for frame in frames_:
frame, g = frame
img = Image.new('RGBA', im.size, tuple(map(lambda v: int(v*255), g.get_rgb())))
img = ImageChops.multiply(frame, img)
if transparency:
# Use a mask to map the transparent area in the gif frame
# optimize MUST be set to False when saving or transparency
# will most likely be broken
# source http://www.pythonclub.org/modules/pil/convert-png-gif
alpha = img.split()[3]
img = img.convert('P', palette=Image.ADAPTIVE, colors=255)
mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0)
img.paste(255, mask=mask)
img.info['transparency'] = 255
img.info['background'] = 255
images.append(img)
except Exception as e:
logger.exception('{} Failed to create gif'.format(e))
data = BytesIO()
if isinstance(frames[0].info.get('duration', None), list):
duration = frames[0].info['duration']
for _ in range(1, extended):
duration.extend(duration)
else:
duration = [frame.info.get('duration', 20) for frame in frames]
images[0].save(data, format='gif', duration=duration, save_all=True, append_images=images[1:], loop=65535, disposal=2, optimize=False)
data.seek(0)
if get_raw:
data = optimize_gif(data.getvalue())
else:
data = Image.open(data)
return data
def apply_transparency(frames):
im = frames[0]
transparency = False
if im.mode == 'RGBA' or im.info.get('background', None) is not None or im.info.get('transparency', None) is not None:
transparency = True
if not transparency:
return frames
images = []
for img in frames:
# Use a mask to map the transparent area in the gif frame
# optimize MUST be set to False when saving or transparency
# will most likely be broken
# source http://www.pythonclub.org/modules/pil/convert-png-gif
alpha = img.split()[3]
img = img.convert('P', palette=Image.ADAPTIVE, colors=255)
mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0)
img.paste(255, mask=mask)
img.info['transparency'] = 255
img.info['background'] = 255
images.append(img)
return images
def optimize_gif(gif_bytes):
cmd = '{}convert - -dither none -layers optimize -dispose background -matte -depth 8 gif:-'.format(MAGICK)
p = subprocess.Popen(split(cmd), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(gif_bytes)
out, _ = p.communicate()
buff = BytesIO(out)
return buff
def concatenate_images(images, width=50):
max_width = width*len(images)
height = max(map(lambda i: i.height, images))
empty = Image.new('RGBA', (max_width, height), (0,0,0,0))
offset = 0
for im in images:
empty.paste(im, (offset, 0))
offset += width
return empty
def stack_images(images, height=50, max_width: int=None):
max_height = height*len(images)
if not max_width:
max_width = max(map(lambda i: i.width, images))
empty = Image.new('RGBA', (max_width, max_height), (0,0,0,0))
offset = 0
for im in images:
empty.paste(im, (0, offset))
offset += height
return empty
| 30.791831
| 146
| 0.607933
|
e3e215d1365d480faf7f2377496146b135e01e07
| 5,921
|
py
|
Python
|
CIM100/IEC61968/AssetModels/ConductorInfo.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM100/IEC61968/AssetModels/ConductorInfo.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM100/IEC61968/AssetModels/ConductorInfo.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM100.IEC61968.Assets.AssetInfo import AssetInfo
class ConductorInfo(AssetInfo):
"""Conductor data.Conductor data.
"""
def __init__(self, insulated=False, usage="secondary", insulationMaterial="treeRetardantCrosslinkedPolyethylene", phaseCount=0, insulationThickness=0.0, WireArrangements=None, LineSegments=None, *args, **kw_args):
"""Initialises a new 'ConductorInfo' instance.
@param insulated: True if conductor is insulated.
@param usage: Usage of this conductor. Values are: "secondary", "other", "distribution", "transmission"
@param insulationMaterial: (if insulated conductor) Material used for insulation. Values are: "treeRetardantCrosslinkedPolyethylene", "butyl", "highPressureFluidFilled", "other", "varnishedCambricCloth", "siliconRubber", "beltedPilc", "crosslinkedPolyethylene", "oilPaper", "lowCapacitanceRubber", "asbestosAndVarnishedCambric", "treeResistantHighMolecularWeightPolyethylene", "unbeltedPilc", "ozoneResistantRubber", "ethylenePropyleneRubber", "highMolecularWeightPolyethylene", "varnishedDacronGlass", "rubber"
@param phaseCount: Number of phases (including neutral) to be retained. Any wires beyond this number should be reduced into the earth return.
@param insulationThickness: (if insulated conductor) Thickness of the insulation.
@param WireArrangements: All wire arrangements (single wires) that make this conductor.
@param LineSegments: All line segments described by this conductor data.
"""
#: True if conductor is insulated.
self.insulated = insulated
#: Usage of this conductor. Values are: "secondary", "other", "distribution", "transmission"
self.usage = usage
#: (if insulated conductor) Material used for insulation. Values are: "treeRetardantCrosslinkedPolyethylene", "butyl", "highPressureFluidFilled", "other", "varnishedCambricCloth", "siliconRubber", "beltedPilc", "crosslinkedPolyethylene", "oilPaper", "lowCapacitanceRubber", "asbestosAndVarnishedCambric", "treeResistantHighMolecularWeightPolyethylene", "unbeltedPilc", "ozoneResistantRubber", "ethylenePropyleneRubber", "highMolecularWeightPolyethylene", "varnishedDacronGlass", "rubber"
self.insulationMaterial = insulationMaterial
#: Number of phases (including neutral) to be retained. Any wires beyond this number should be reduced into the earth return.
self.phaseCount = phaseCount
#: (if insulated conductor) Thickness of the insulation.
self.insulationThickness = insulationThickness
self._WireArrangements = []
self.WireArrangements = [] if WireArrangements is None else WireArrangements
self._LineSegments = []
self.LineSegments = [] if LineSegments is None else LineSegments
super(ConductorInfo, self).__init__(*args, **kw_args)
_attrs = ["insulated", "usage", "insulationMaterial", "phaseCount", "insulationThickness"]
_attr_types = {"insulated": bool, "usage": str, "insulationMaterial": str, "phaseCount": int, "insulationThickness": float}
_defaults = {"insulated": False, "usage": "secondary", "insulationMaterial": "treeRetardantCrosslinkedPolyethylene", "phaseCount": 0, "insulationThickness": 0.0}
_enums = {"usage": "ConductorUsageKind", "insulationMaterial": "ConductorInsulationKind"}
_refs = ["WireArrangements", "LineSegments"]
_many_refs = ["WireArrangements", "LineSegments"]
def getWireArrangements(self):
"""All wire arrangements (single wires) that make this conductor.
"""
return self._WireArrangements
def setWireArrangements(self, value):
for x in self._WireArrangements:
x.ConductorInfo = None
for y in value:
y._ConductorInfo = self
self._WireArrangements = value
WireArrangements = property(getWireArrangements, setWireArrangements)
def addWireArrangements(self, *WireArrangements):
for obj in WireArrangements:
obj.ConductorInfo = self
def removeWireArrangements(self, *WireArrangements):
for obj in WireArrangements:
obj.ConductorInfo = None
def getLineSegments(self):
"""All line segments described by this conductor data.
"""
return self._LineSegments
def setLineSegments(self, value):
for x in self._LineSegments:
x.ConductorInfo = None
for y in value:
y._ConductorInfo = self
self._LineSegments = value
LineSegments = property(getLineSegments, setLineSegments)
def addLineSegments(self, *LineSegments):
for obj in LineSegments:
obj.ConductorInfo = self
def removeLineSegments(self, *LineSegments):
for obj in LineSegments:
obj.ConductorInfo = None
| 52.866071
| 519
| 0.726735
|
9f8ce0fb9ba3ee0c3287dc7b188f1c855fee24bf
| 2,276
|
py
|
Python
|
aiogram/types/update.py
|
victorusachev/aiogram
|
9571669ca4b06165031d8f9830130f3c638b60d8
|
[
"MIT"
] | 1
|
2020-02-27T02:46:51.000Z
|
2020-02-27T02:46:51.000Z
|
aiogram/types/update.py
|
Kylmakalle/aiogram
|
550c41e1752aa08c493d7cb4ec5fec402d8e849c
|
[
"MIT"
] | 1
|
2019-10-18T19:33:20.000Z
|
2019-10-18T19:33:20.000Z
|
aiogram/types/update.py
|
Kylmakalle/aiogram
|
550c41e1752aa08c493d7cb4ec5fec402d8e849c
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from . import base
from . import fields
from .callback_query import CallbackQuery
from .chosen_inline_result import ChosenInlineResult
from .inline_query import InlineQuery
from .message import Message
from .poll import Poll
from .pre_checkout_query import PreCheckoutQuery
from .shipping_query import ShippingQuery
from ..utils import helper
class Update(base.TelegramObject):
"""
This object represents an incoming update.
At most one of the optional parameters can be present in any given update.
https://core.telegram.org/bots/api#update
"""
update_id: base.Integer = fields.Field()
message: Message = fields.Field(base=Message)
edited_message: Message = fields.Field(base=Message)
channel_post: Message = fields.Field(base=Message)
edited_channel_post: Message = fields.Field(base=Message)
inline_query: InlineQuery = fields.Field(base=InlineQuery)
chosen_inline_result: ChosenInlineResult = fields.Field(base=ChosenInlineResult)
callback_query: CallbackQuery = fields.Field(base=CallbackQuery)
shipping_query: ShippingQuery = fields.Field(base=ShippingQuery)
pre_checkout_query: PreCheckoutQuery = fields.Field(base=PreCheckoutQuery)
poll: Poll = fields.Field(base=Poll)
def __hash__(self):
return self.update_id
def __int__(self):
return self.update_id
class AllowedUpdates(helper.Helper):
"""
Helper for allowed_updates parameter in getUpdates and setWebhook methods.
You can use &, + or | operators for make combination of allowed updates.
Example:
>>> bot.get_updates(allowed_updates=AllowedUpdates.MESSAGE + AllowedUpdates.EDITED_MESSAGE)
"""
mode = helper.HelperMode.snake_case
MESSAGE = helper.ListItem() # message
EDITED_MESSAGE = helper.ListItem() # edited_message
CHANNEL_POST = helper.ListItem() # channel_post
EDITED_CHANNEL_POST = helper.ListItem() # edited_channel_post
INLINE_QUERY = helper.ListItem() # inline_query
CHOSEN_INLINE_QUERY = helper.ListItem() # chosen_inline_result
CALLBACK_QUERY = helper.ListItem() # callback_query
SHIPPING_QUERY = helper.ListItem() # shipping_query
PRE_CHECKOUT_QUERY = helper.ListItem() # pre_checkout_query
| 37.311475
| 99
| 0.755272
|
38b791252d9aa0f629c27d53c3efb223a50279ef
| 13,560
|
py
|
Python
|
conformity/fields/logging.py
|
arareko/conformity
|
a24d6027e7195d20c43065a06f610182a16fe9cd
|
[
"Apache-2.0"
] | 26
|
2017-07-03T04:52:35.000Z
|
2021-08-15T00:19:00.000Z
|
conformity/fields/logging.py
|
arareko/conformity
|
a24d6027e7195d20c43065a06f610182a16fe9cd
|
[
"Apache-2.0"
] | 16
|
2017-05-01T19:31:56.000Z
|
2019-10-10T14:22:01.000Z
|
conformity/fields/logging.py
|
arareko/conformity
|
a24d6027e7195d20c43065a06f610182a16fe9cd
|
[
"Apache-2.0"
] | 8
|
2019-05-13T17:48:24.000Z
|
2021-09-08T11:27:39.000Z
|
from __future__ import (
absolute_import,
unicode_literals,
)
import collections
import logging
from typing import (
Any as AnyType,
Hashable as HashableType,
List as ListType,
Mapping,
Optional,
Tuple as TupleType,
)
import six
from conformity import fields
from conformity.constants import ERROR_CODE_UNKNOWN
from conformity.types import Error
__all__ = (
'PythonLogLevel',
'PYTHON_LOGGER_SCHEMA',
'PYTHON_LOGGING_CONFIG_SCHEMA',
'PYTHON_ROOT_LOGGER_SCHEMA',
)
class PythonLogLevel(fields.Constant):
"""
A pre-defined `Constant` field with all the possible Python log levels populated. All you need is a description for
documentation.
"""
def __init__(self, description=None): # type: (Optional[six.text_type]) -> None
"""
Constructs a `PythonLogLevel` field.
:param description: The description for documentation
"""
super(PythonLogLevel, self).__init__(
logging.getLevelName(logging.DEBUG),
logging.getLevelName(logging.INFO),
logging.getLevelName(logging.WARNING),
logging.getLevelName(logging.ERROR),
logging.getLevelName(logging.CRITICAL),
description=description,
)
class _LoggingValidator(fields.AdditionalCollectionValidator[Mapping[HashableType, AnyType]]):
@staticmethod
def _ensure_configured(
source, # type: Mapping[str, AnyType]
name, # type: str
errors, # type: ListType[Error]
referencer_noun, # type: str
referencer, # type: str
referenced_noun, # type: str
pointer, # type: str
pointer_args, # type: TupleType[AnyType, ...]
):
if name not in source:
errors.append(Error(
code=ERROR_CODE_UNKNOWN,
message=(
'{referencer_noun} "{referencer}" references {referenced_noun} "{name}", which is not configured.'
).format(
referencer_noun=referencer_noun,
referencer=referencer,
referenced_noun=referenced_noun,
name=name,
),
pointer=pointer.format(*pointer_args),
))
def errors(self, value): # type: (Mapping[HashableType, AnyType]) -> ListType[Error]
errors = [] # type: ListType[Error]
formatters = value.get('formatters', {}) # type: Mapping[str, Mapping[str, str]]
filters = value.get('filters', {}) # type: Mapping[str, Mapping[str, AnyType]]
handlers = value.get('handlers', {}) # type: Mapping[str, Mapping[str, AnyType]]
loggers = value.get('loggers', {}) # type: Mapping[str, Mapping[str, AnyType]]
root = value.get('root', {}) # type: Mapping[str, AnyType]
if filters:
for filter_name, filter_config in filters.items():
standard_keys = 0
if '()' in filter_config:
standard_keys = 1
is_standard = filter_config['()'] == 'logging.Filter'
else:
is_standard = True
if 'name' in filter_config:
standard_keys += 1
if is_standard and len(filter_config) > standard_keys:
errors.append(Error(
code=ERROR_CODE_UNKNOWN,
message='Not all keys supported for filter named "{}"'.format(filter_name),
pointer='filters.{}'.format(filter_name),
))
if value.get('incremental', False) is not True:
if handlers:
for handler_name, handler_config in handlers.items():
if 'formatter' in handler_config:
self._ensure_configured(
formatters, handler_config['formatter'], errors,
'Handler', handler_name, 'formatter', 'handlers.{}.formatter', (handler_name, ),
)
handler_filters = handler_config.get('filters', []) # type: ListType[str]
for i, filter in enumerate(handler_filters):
self._ensure_configured(
filters, filter, errors,
'Handler', handler_name, 'filter', 'handlers.{}.filters.{}', (handler_name, i),
)
if loggers:
for logger_name, logger_config in loggers.items():
logger_filters = logger_config.get('filters', []) # type: ListType[str]
for i, filter in enumerate(logger_filters):
self._ensure_configured(
filters, filter, errors,
'Logger', logger_name, 'filter', 'loggers.{}.filters.{}', (logger_name, i),
)
logger_handlers = logger_config.get('handlers', []) # type: ListType[str]
for i, handler in enumerate(logger_handlers):
self._ensure_configured(
handlers, handler, errors,
'Logger', logger_name, 'handler', 'loggers.{}.handlers.{}', (logger_name, i),
)
if root:
root_filters = root.get('filters', []) # type: ListType[str]
for i, filter in enumerate(root_filters):
self._ensure_configured(
filters, filter, errors,
'Logger', 'root', 'filter', 'root.filters.{}', (i, ),
)
root_handlers = root.get('handlers', []) # type: ListType[str]
for i, handler in enumerate(root_handlers):
self._ensure_configured(
handlers, handler, errors,
'Logger', 'root', 'handler', 'root.handlers.{}', (i, ),
)
return errors
PYTHON_ROOT_LOGGER_SCHEMA = fields.Dictionary(
{
'level': PythonLogLevel(
description='The logging level at or above which this logger will handle logging events and send them to '
'its configured handlers.',
),
'filters': fields.List(
fields.UnicodeString(),
description='A list of references to keys from `filters` for assigning those filters to this logger.',
),
'handlers': fields.List(
fields.UnicodeString(),
description='A list of references to keys from `handlers` for assigning those handlers to this logger.',
),
},
optional_keys=('level', 'filters', 'handlers'),
)
PYTHON_LOGGER_SCHEMA = PYTHON_ROOT_LOGGER_SCHEMA.extend(
contents={
'propagate': fields.Boolean(
description='Whether logging events handled by this logger should propagate to other loggers and/or the '
'root logger. Defaults to `True`.'
),
},
optional_keys=('propagate', ),
)
PYTHON_LOGGING_CONFIG_SCHEMA = fields.Dictionary(
collections.OrderedDict((
('version', fields.Integer(gte=1, lte=1)),
('formatters', fields.SchemalessDictionary(
key_type=fields.UnicodeString(),
value_type=fields.Dictionary(
{
'format': fields.UnicodeString(
description='The format string for this formatter (see '
'https://docs.python.org/3/library/logging.html#logrecord-attributes).',
),
'datefmt': fields.UnicodeString(
description='The optional date format used when formatting dates in the log output (see '
'https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior).',
),
},
optional_keys=('datefmt', ),
),
description='This defines a mapping of logging formatter names to formatter configurations. The `format` '
'key specifies the log format and the `datefmt` key specifies the date format.',
)),
('filters', fields.SchemalessDictionary(
key_type=fields.UnicodeString(),
value_type=fields.Dictionary(
{
'()': fields.TypePath(
base_classes=logging.Filter,
description='The optional, fully-qualified name of the class extending `logging.Filter`, used '
'to override the default class `logging.Filter`.',
),
'name': fields.UnicodeString(
description='The optional filter name which will be passed to the `name` argument of the '
'`logging.Filter` class.',
),
},
optional_keys=('()', 'name'),
allow_extra_keys=True,
),
description='This defines a mapping of logging filter names to filter configurations. If a config has '
'only the `name` key, then `logging.Filter` will be instantiated with that argument. You can '
'specify a `()` key (yes, really) to override the default `logging.Filter` class with a '
'custom filter implementation (which should extend `logging.Filter`). Extra keys are allowed '
'only for custom implementations having extra constructor arguments matching those key names.',
)),
('handlers', fields.SchemalessDictionary(
key_type=fields.UnicodeString(),
value_type=fields.Dictionary(
{
'class': fields.TypePath(
base_classes=logging.Handler,
description='The fully-qualified name of the class extending `logging.Handler`.',
),
'level': PythonLogLevel(
description='The logging level at or above which this handler will emit logging events.',
),
'formatter': fields.UnicodeString(
description='A reference to a key from `formatters` for assigning that formatter to this '
'handler.',
),
'filters': fields.List(
fields.UnicodeString(),
description='A list of references to keys from `filters` for assigning those filters to this '
'handler.',
),
},
optional_keys=('level', 'formatter', 'filters'),
allow_extra_keys=True,
),
description='This defines a mapping of logging handler names to handler configurations. The `class` key '
'is the importable Python path to the class extending `logging.Handler`. The `level` and '
'`filters` keys apply to all handlers. The `formatter` key is valid for all handlers, but not '
'all handlers will use it. Extra keys are allowed only for handlers having extra constructor '
'arguments matching those key names.',
)),
('loggers', fields.SchemalessDictionary(
key_type=fields.UnicodeString(),
value_type=PYTHON_LOGGER_SCHEMA,
description='This defines a mapping of logger names to logger configurations. A log event not handled by '
'one of these configured loggers (if any) will instead be handled by the root logger. A log '
'event handled by one of these configured loggers may still be handled by another logger or '
'the root logger unless its `propagate` key is set to `False`.',
)),
('root', PYTHON_ROOT_LOGGER_SCHEMA),
('incremental', fields.Boolean(
description='Whether this configuration should be considered incremental to any existing configuration. '
'It defaults to `False` and it is rare that you should ever need to change that.',
)),
('disable_existing_loggers', fields.Boolean(
description='Whether all existing loggers (objects obtained from `logging.getLogger()`) should be '
'disabled when this logging config is loaded. Take our advice and *always* set this to '
'`False`. It defaults to `True` and you almost never want that, because loggers in '
'already-loaded modules will stop working.',
)),
)),
optional_keys=(
'version',
'formatters',
'filters',
'handlers',
'root',
'loggers',
'incremental',
'disable_existing_loggers',
),
description='Settings to enforce the standard Python logging dictionary-based configuration, as you would load '
'with `logging.config.dictConfig()`. For more information than the documentation here, see '
'https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema.',
additional_validator=_LoggingValidator(),
)
"""""" # Empty docstring to make autodoc document this data
| 45.810811
| 119
| 0.550221
|
4d512c1f62caf61fccd71f4679a076e73a52550a
| 711
|
py
|
Python
|
setup_app/static.py
|
duttarnab/community-edition-setup
|
cc87861440397648216af899f87d6868a8064190
|
[
"MIT"
] | 178
|
2015-02-18T19:36:13.000Z
|
2022-03-27T19:38:58.000Z
|
setup_app/static.py
|
duttarnab/community-edition-setup
|
cc87861440397648216af899f87d6868a8064190
|
[
"MIT"
] | 640
|
2015-01-02T16:54:56.000Z
|
2022-03-30T18:51:08.000Z
|
setup_app/static.py
|
duttarnab/community-edition-setup
|
cc87861440397648216af899f87d6868a8064190
|
[
"MIT"
] | 76
|
2015-02-04T21:55:09.000Z
|
2022-03-11T19:24:47.000Z
|
class InstallTypes:
NONE = 0
LOCAL = '1'
REMOTE = '2'
class colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
DANGER = '\033[31m'
class BackendTypes:
LDAP = 1
COUCHBASE = 2
MYSQL = 3
PGSQL = 4
SPANNER = 5
class AppType:
APPLICATION = 1
SERVICE = 2
class InstallOption:
MONDATORY = 1
OPTONAL = 2
COMPLETED = -99
ERROR = -101
suggested_mem_size = 3.7 # in GB
suggested_number_of_cpu = 2
suggested_free_disk_space = 40 #in GB
file_max = 64000
| 18.230769
| 37
| 0.535865
|
12fc1aa6ef1099f65687f8af206073203ae4c96c
| 14,824
|
py
|
Python
|
lib/datasets/pascal_voc.py
|
jinyu121/CIOD
|
37ab2ce14635c4b5cef2ea43b8439c5cd0e0f662
|
[
"MIT"
] | 33
|
2019-07-09T07:14:40.000Z
|
2022-02-17T03:00:36.000Z
|
lib/datasets/pascal_voc.py
|
jinyu121/CIOD
|
37ab2ce14635c4b5cef2ea43b8439c5cd0e0f662
|
[
"MIT"
] | 7
|
2020-01-10T16:37:12.000Z
|
2021-11-26T02:02:13.000Z
|
lib/datasets/pascal_voc.py
|
jinyu121/CIOD
|
37ab2ce14635c4b5cef2ea43b8439c5cd0e0f662
|
[
"MIT"
] | 7
|
2019-07-18T02:27:44.000Z
|
2020-04-28T09:41:27.000Z
|
from __future__ import absolute_import
from __future__ import print_function
import os
import pickle
import subprocess
import uuid
import xml.etree.ElementTree as ET
# import PIL
import numpy as np
import scipy.io as sio
import scipy.sparse
from tqdm import tqdm
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from model.utils.config import cfg
from . import ds_utils
from .imdb import imdb
from .voc_eval import voc_eval
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
# <<<< obsolete
class pascal_voc(imdb):
def __init__(self, image_set, year, classes=None, ext=None, devkit_path=None, data_extra=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = devkit_path or self._get_default_path()
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = classes or ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = ext or '.jpg'
self._image_index = self._load_image_set_index()
if data_extra:
self._image_index.extend(data_extra)
# Default to roidb handler
# self._roidb_handler = self.selective_search_roidb
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None,
'min_size': 2}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_id_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return i
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
os.remove(cache_file)
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
tqdm.write('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
tqdm.write('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)
tqdm.write('wrote ss roidb to {}'.format(cache_file))
return roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
tqdm.write('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
# if not self.config['use_diff']:
# # Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# # if len(non_diff_objs) != len(objs):
# # print 'Removed {} difficult objects'.format(
# # len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
ishards = np.zeros((num_objs), dtype=np.int32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc == None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_ishard': ishards,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
# tqdm.write('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
tqdm.write('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
tqdm.write(" | ".join(["cls", "ap", "gt", "tot", "correct", "wrong", "w_iou", "w_cls", "w_oth"]))
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap, t_ngt, t_tot, t_cor, t_wro, t_iou, t_cls, t_oth = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
# tqdm.write('AP for {} = {:.4f}'.format(cls, ap))
tqdm.write(
' | '.join("{:4}".format(x) for x in [cls, ap * 100, t_ngt, t_tot, t_cor, t_wro, t_iou, t_cls, t_oth]))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
tqdm.write('Mean AP = {:.4f}'.format(np.mean(aps)))
tqdm.write('~~~~~~~~')
tqdm.write('Results:')
for ap in aps:
tqdm.write('{:.3f}'.format(ap))
tqdm.write('{:.3f}'.format(np.mean(aps)))
tqdm.write('~~~~~~~~')
tqdm.write('')
return aps
def _do_matlab_eval(self, output_dir='output'):
tqdm.write('-----------------------------------------------------')
tqdm.write('Computing results with the official MATLAB eval code.')
tqdm.write('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
tqdm.write('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
aps = self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
return aps
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| 39.849462
| 119
| 0.552617
|
2f0607189c7e893f7d80d21f224f62db7a96b4d2
| 3,443
|
py
|
Python
|
src/train.py
|
Hzx66666/FairMOT_PS
|
c0b2ef18cd712ebd2512ce73672667a72a9d4f04
|
[
"MIT"
] | null | null | null |
src/train.py
|
Hzx66666/FairMOT_PS
|
c0b2ef18cd712ebd2512ce73672667a72a9d4f04
|
[
"MIT"
] | null | null | null |
src/train.py
|
Hzx66666/FairMOT_PS
|
c0b2ef18cd712ebd2512ce73672667a72a9d4f04
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import json
import torch
import torch.utils.data
from torchvision.transforms import transforms as T
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
print('Setting up data...')
Dataset = get_dataset(opt.dataset, opt.task)
f = open(opt.data_cfg)
data_config = json.load(f)
trainset_paths = data_config['train']
dataset_root = data_config['root']
f.close()
transforms = T.Compose([T.ToTensor()])
dataset = Dataset(opt, dataset_root, trainset_paths,
(1088, 608), augment=True, transforms=transforms)
opt = opts().update_dataset_info_and_set_heads(opt, dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv)
print(model)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
# Get dataloader
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, trainer.optimizer, opt.resume, opt.lr, opt.lr_step)
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if epoch % 5 == 0 or epoch >= 25:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
logger.close()
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
opt = opts().parse()
main(opt)
| 34.089109
| 86
| 0.643334
|
dec9fe68d24def2c564d51bafb225b4e06efdb37
| 3,089
|
py
|
Python
|
tests/core/test_setget.py
|
psumesh/siliconcompiler
|
14663c1d0d6c46994bc9bb24595db7e4ac4e1600
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_setget.py
|
psumesh/siliconcompiler
|
14663c1d0d6c46994bc9bb24595db7e4ac4e1600
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_setget.py
|
psumesh/siliconcompiler
|
14663c1d0d6c46994bc9bb24595db7e4ac4e1600
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
import siliconcompiler
import re
def test_setget():
'''API test for set/get methods
Performs set or add based on API example for each entry in schema and
ensures that we can recover the same value back. Also tests that keypaths in
schema examples are valid.
'''
DEBUG = False
chip = siliconcompiler.Chip()
error = 0
allkeys = chip.getkeys()
for key in allkeys:
sctype = chip.get(*key, field='type')
examples = chip.get(*key, field='example')
if DEBUG:
print(key, sctype, examples)
for example in examples:
match = re.match(r'api\:\s+chip.(set|add|get)\((.*)\)', example)
if match is not None:
break
assert match is not None, f'Illegal example for keypath {key}'
if match.group(1) == 'get':
continue
argstring = re.sub(r'[\'\s]', '', match.group(2))
tuplematch = re.match(r'(.*?),\((.*,.*)\)', argstring)
if tuplematch:
keypath = tuplematch.group(1).split(',')
tuplestr = tuplematch.group(2)
if sctype.strip('[]').startswith('(str,'):
tuplestr = re.sub(r'[\(\)\'\s]','',tuplestr)
value = tuple(tuplestr.split(','))
else:
value = tuple(map(float, tuplestr.split(',')))
if re.match(r'\[',sctype):
value = [value]
args = keypath + [value]
else:
keypath = argstring.split(',')[:-1]
value = argstring.split(',')[-1]
if sctype == "float":
value = float(value)
elif sctype == "bool":
value = bool(sctype=='true')
elif sctype == "int":
value = int(value)
if re.match(r'\[',sctype):
value = [value]
args = keypath + [value]
if match.group(1) == 'set':
if DEBUG:
print(args)
chip.set(*args, clobber=True)
elif match.group(1) == 'add':
chip.add(*args)
result = chip.get(*keypath)
assert result == value, f'Expected value {value} from keypath {keypath}. Got {result}.'
chip.write_manifest('allvals.json')
assert(error==0)
def test_set_field_bool():
chip = siliconcompiler.Chip()
chip.set('source', False, field='copy')
assert chip.get('source', field='copy') is False
def test_set_field_error():
chip = siliconcompiler.Chip()
chip.set('source', 'asdf', field='copy')
# expect copy flag unchanged and error triggered
assert chip.get('source', field='copy') is True
assert chip.error == 1
def test_set_add_field_list():
chip = siliconcompiler.Chip()
chip.set('source', 'Alyssa P. Hacker', field='author')
chip.add('source', 'Ben Bitdiddle', field='author')
assert chip.get('source', field='author') == ['Alyssa P. Hacker', 'Ben Bitdiddle']
#########################
if __name__ == "__main__":
test_setget()
| 32.861702
| 95
| 0.546455
|
bed0baa456cf504a60e9aeb3c3041b154ec6df7a
| 19,094
|
py
|
Python
|
rmgpy/data/kinetics/common.py
|
mbprend/RMG-Py
|
29e111d683f2daa0b376417be60e76b32ce8a993
|
[
"MIT"
] | null | null | null |
rmgpy/data/kinetics/common.py
|
mbprend/RMG-Py
|
29e111d683f2daa0b376417be60e76b32ce8a993
|
[
"MIT"
] | null | null | null |
rmgpy/data/kinetics/common.py
|
mbprend/RMG-Py
|
29e111d683f2daa0b376417be60e76b32ce8a993
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module contains classes and functions that are used by multiple modules
in this subpackage.
"""
import itertools
import logging
from rmgpy.data.base import LogicNode
from rmgpy.exceptions import DatabaseError
from rmgpy.molecule import Group, Molecule
from rmgpy.reaction import Reaction
from rmgpy.species import Species
################################################################################
def save_entry(f, entry):
"""
Save an `entry` in the kinetics database by writing a string to
the given file object `f`.
"""
def sort_efficiencies(efficiencies0):
efficiencies = {}
for mol, eff in efficiencies0.items():
if isinstance(mol, str):
# already in SMILES string format
smiles = mol
else:
smiles = mol.to_smiles()
efficiencies[smiles] = eff
keys = list(efficiencies.keys())
keys.sort()
return [(key, efficiencies[key]) for key in keys]
f.write('entry(\n')
f.write(' index = {0:d},\n'.format(entry.index))
if entry.label != '':
f.write(' label = "{0}",\n'.format(entry.label))
# Entries for kinetic rules, libraries, training reactions
# and depositories will have a Reaction object for its item
if isinstance(entry.item, Reaction):
# Write out additional data if depository or library
# kinetic rules would have a Group object for its reactants instead of Species
if isinstance(entry.item.reactants[0], Species):
# Add degeneracy if the reaction is coming from a depository or kinetics library
f.write(' degeneracy = {0:.1f},\n'.format(entry.item.degeneracy))
if entry.item.duplicate:
f.write(' duplicate = {0!r},\n'.format(entry.item.duplicate))
if not entry.item.reversible:
f.write(' reversible = {0!r},\n'.format(entry.item.reversible))
if entry.item.allow_pdep_route:
f.write(' allow_pdep_route = {0!r},\n'.format(entry.item.allow_pdep_route))
if entry.item.elementary_high_p:
f.write(' elementary_high_p = {0!r},\n'.format(entry.item.elementary_high_p))
if entry.item.allow_max_rate_violation:
f.write(' allow_max_rate_violation = {0!r},\n'.format(entry.item.allow_max_rate_violation))
# Entries for groups with have a group or logicNode for its item
elif isinstance(entry.item, Group):
f.write(' group = \n')
f.write('"""\n')
f.write(entry.item.to_adjacency_list())
f.write('""",\n')
elif isinstance(entry.item, LogicNode):
f.write(' group = "{0}",\n'.format(entry.item))
else:
raise DatabaseError("Encountered unexpected item of type {0} while "
"saving database.".format(entry.item.__class__))
# Write kinetics
if isinstance(entry.data, str):
f.write(' kinetics = "{0}",\n'.format(entry.data))
elif entry.data is not None:
efficiencies = None
if hasattr(entry.data, 'efficiencies'):
efficiencies = entry.data.efficiencies
entry.data.efficiencies = dict(sort_efficiencies(entry.data.efficiencies))
kinetics = repr(entry.data) # todo prettify currently does not support uncertainty attribute
kinetics = ' kinetics = {0},\n'.format(kinetics.replace('\n', '\n '))
f.write(kinetics)
if hasattr(entry.data, 'efficiencies'):
entry.data.efficiencies = efficiencies
else:
f.write(' kinetics = None,\n')
# Write reference
if entry.reference is not None:
reference = entry.reference.to_pretty_repr()
lines = reference.splitlines()
f.write(' reference = {0}\n'.format(lines[0]))
for line in lines[1:-1]:
f.write(' {0}\n'.format(line))
f.write(' ),\n'.format(lines[0]))
if entry.reference_type != "":
f.write(' referenceType = "{0}",\n'.format(entry.reference_type))
if entry.rank is not None:
f.write(' rank = {0},\n'.format(entry.rank))
if entry.short_desc.strip() != '':
f.write(f' shortDesc = """{entry.short_desc.strip()}""",\n')
if entry.long_desc.strip() != '':
f.write(f' longDesc = \n"""\n{entry.long_desc.strip()}\n""",\n')
f.write(')\n\n')
def ensure_species(input_list, resonance=False, keep_isomorphic=False):
"""
The input list of :class:`Species` or :class:`Molecule` objects is modified
in place to only have :class:`Species` objects. Returns None.
"""
for index, item in enumerate(input_list):
if isinstance(item, Molecule):
new_item = Species(molecule=[item])
elif isinstance(item, Species):
new_item = item
else:
raise TypeError('Only Molecule or Species objects can be handled.')
if resonance:
if not any([mol.reactive for mol in new_item.molecule]):
# if generating a reaction containing a Molecule with a reactive=False flag (e.g., for degeneracy
# calculations), that was now converted into a Species, first mark as reactive=True
new_item.molecule[0].reactive = True
new_item.generate_resonance_structures(keep_isomorphic=keep_isomorphic)
input_list[index] = new_item
def generate_molecule_combos(input_species):
"""
Generate combinations of molecules from the given species objects.
"""
if len(input_species) == 1:
combos = [(mol,) for mol in input_species[0].molecule]
elif len(input_species) == 2:
combos = itertools.product(input_species[0].molecule, input_species[1].molecule)
elif len(input_species) == 3:
combos = itertools.product(input_species[0].molecule, input_species[1].molecule, input_species[2].molecule)
else:
raise ValueError('Reaction generation can be done for 1, 2, or 3 species, not {0}.'.format(len(input_species)))
return combos
def ensure_independent_atom_ids(input_species, resonance=True):
"""
Given a list or tuple of :class:`Species` or :class:`Molecule` objects,
ensure that atom ids are independent.
The `resonance` argument can be set to False to not generate
resonance structures.
Modifies the list in place (replacing :class:`Molecule` with :class:`Species`).
Returns None.
"""
ensure_species(input_species) # do not generate resonance structures since we do so below
# Method to check that all species' atom ids are different
def independent_ids():
num_atoms = 0
ids = []
for spcs in input_species:
num_atoms += len(spcs.molecule[0].atoms)
ids.extend([atom.id for atom in spcs.molecule[0].atoms])
num_id = len(set(ids))
return num_id == num_atoms
# If they are not all different, reassign ids and remake resonance structures
if not independent_ids():
for species in input_species:
unreactive_mol_list = [mol for mol in species.molecule if not mol.reactive]
mol = [mol for mol in species.molecule if mol.reactive][0] # Choose first reactive molecule
mol.assign_atom_ids()
species.molecule = [mol]
# Remake resonance structures with new labels
if resonance:
species.generate_resonance_structures(keep_isomorphic=True)
if len(unreactive_mol_list):
species.molecule.extend(unreactive_mol_list)
elif resonance:
# IDs are already independent, generate resonance structures if needed
for species in input_species:
species.generate_resonance_structures(keep_isomorphic=True)
def find_degenerate_reactions(rxn_list, same_reactants=None, template=None, kinetics_database=None,
kinetics_family=None):
"""
Given a list of Reaction objects, this method combines degenerate
reactions and increments the reaction degeneracy value. For multiple
transition states, this method keeps them as duplicate reactions.
If a template is specified, then the reaction list will be filtered
to leave only reactions which match the specified template, then the
degeneracy will be calculated as usual.
A KineticsDatabase or KineticsFamily instance can also be provided to
calculate the degeneracy for reactions generated in the reverse direction.
If not provided, then it will be retrieved from the global database.
This algorithm used to exist in family._generate_reactions, but was moved
here so it could operate across reaction families.
This method returns an updated list with degenerate reactions removed.
Args:
rxn_list (list): reactions to be analyzed
same_reactants (bool, optional): indicate whether the reactants are identical
template (list, optional): specify a specific template to filter by
kinetics_database (KineticsDatabase, optional): provide a KineticsDatabase instance for calculating degeneracy
kinetics_family (KineticsFamily, optional): provide a KineticsFamily instance for calculating degeneracy
Returns:
Reaction list with degenerate reactions combined with proper degeneracy values
"""
# If a specific reaction template is requested, filter by that template
if template is not None:
selected_rxns = []
template = frozenset(template)
for rxn in rxn_list:
if template == frozenset(rxn.template):
selected_rxns.append(rxn)
if not selected_rxns:
# Only log a warning here. If a non-empty output is expected, then the caller should raise an exception
logging.warning('No reactions matched the specified template, {0}'.format(template))
return []
else:
selected_rxns = rxn_list
# We want to sort all the reactions into sublists composed of isomorphic reactions
# with degenerate transition states
sorted_rxns = []
for rxn0 in selected_rxns:
rxn0.ensure_species()
if len(sorted_rxns) == 0:
# This is the first reaction, so create a new sublist
sorted_rxns.append([rxn0])
else:
# Loop through each sublist, which represents a unique reaction
for sub_list in sorted_rxns:
# Try to determine if the current rxn0 is identical or isomorphic to any reactions in the sublist
isomorphic = False
identical = False
same_template = True
for rxn in sub_list:
isomorphic = rxn0.is_isomorphic(rxn, check_identical=False, strict=False,
check_template_rxn_products=True)
if isomorphic:
identical = rxn0.is_isomorphic(rxn, check_identical=True, strict=False,
check_template_rxn_products=True)
if identical:
# An exact copy of rxn0 is already in our list, so we can move on
break
same_template = frozenset(rxn.template) == frozenset(rxn0.template)
else:
# This sublist contains a different product
break
# Process the reaction depending on the results of the comparisons
if identical:
# This reaction does not contribute to degeneracy
break
elif isomorphic:
if same_template:
# We found the right sublist, and there is no identical reaction
# We should add rxn0 to the sublist as a degenerate rxn, and move on to the next rxn
sub_list.append(rxn0)
break
else:
# We found an isomorphic sublist, but the reaction templates are different
# We need to mark this as a duplicate and continue searching the remaining sublists
rxn0.duplicate = True
sub_list[0].duplicate = True
continue
else:
# This is not an isomorphic sublist, so we need to continue searching the remaining sublists
# Note: This else statement is not technically necessary but is included for clarity
continue
else:
# We did not break, which means that there was no isomorphic sublist, so create a new one
sorted_rxns.append([rxn0])
rxn_list = []
for sub_list in sorted_rxns:
# Collapse our sorted reaction list by taking one reaction from each sublist
rxn = sub_list[0]
# The degeneracy of each reaction is the number of reactions that were in the sublist
rxn.degeneracy = sum([reaction0.degeneracy for reaction0 in sub_list])
rxn_list.append(rxn)
for rxn in rxn_list:
if rxn.is_forward:
reduce_same_reactant_degeneracy(rxn, same_reactants)
else:
# fix the degeneracy of (not ownReverse) reactions found in the backwards direction
try:
family = kinetics_family or kinetics_database.families[rxn.family]
except AttributeError:
from rmgpy.data.rmg import get_db
family = get_db('kinetics').families[rxn.family]
if not family.own_reverse:
rxn.degeneracy = family.calculate_degeneracy(rxn)
return rxn_list
def reduce_same_reactant_degeneracy(reaction, same_reactants=None):
"""
This method reduces the degeneracy of reactions with identical reactants,
since translational component of the transition states are already taken
into account (so swapping the same reactant is not valid)
same_reactants can be None or an integer. If it is None, then isomorphism
checks will be done to determine if the reactions are the same. If it is an
integer, that integer denotes the number of reactants that are isomorphic.
This comes from work by Bishop and Laidler in 1965
"""
if not (same_reactants == 0 or same_reactants == 1):
if len(reaction.reactants) == 2:
if ((reaction.is_forward and same_reactants == 2) or
reaction.reactants[0].is_isomorphic(reaction.reactants[1])):
reaction.degeneracy *= 0.5
logging.debug(
'Degeneracy of reaction {} was decreased by 50% to {} since the reactants are identical'.format(
reaction, reaction.degeneracy)
)
elif len(reaction.reactants) == 3:
if reaction.is_forward:
if same_reactants == 3:
reaction.degeneracy /= 6.0
logging.debug(
'Degeneracy of reaction {} was divided by 6 to give {} since all of the reactants '
'are identical'.format(reaction, reaction.degeneracy)
)
elif same_reactants == 2:
reaction.degeneracy *= 0.5
logging.debug(
'Degeneracy of reaction {} was decreased by 50% to {} since two of the reactants '
'are identical'.format(reaction, reaction.degeneracy)
)
else:
same_01 = reaction.reactants[0].is_isomorphic(reaction.reactants[1])
same_02 = reaction.reactants[0].is_isomorphic(reaction.reactants[2])
if same_01 and same_02:
reaction.degeneracy /= 6.0
logging.debug(
'Degeneracy of reaction {} was divided by 6 to give {} since all of the reactants '
'are identical'.format(reaction, reaction.degeneracy)
)
elif same_01 or same_02:
reaction.degeneracy *= 0.5
logging.debug(
'Degeneracy of reaction {} was decreased by 50% to {} since two of the reactants '
'are identical'.format(reaction, reaction.degeneracy)
)
elif reaction.reactants[1].is_isomorphic(reaction.reactants[2]):
reaction.degeneracy *= 0.5
logging.debug(
'Degeneracy of reaction {} was decreased by 50% to {} since two of the reactants '
'are identical'.format(reaction, reaction.degeneracy)
)
| 48.339241
| 119
| 0.591285
|
0b172c693d737b1262a401fef9c7b8f60c31f1db
| 1,222
|
py
|
Python
|
examples/scripts/ephemeris/create_ephemeris_object.py
|
fossabot/pyaurorax
|
cb3e72a90f3107302d4f9fd4b0478fe98616354d
|
[
"MIT"
] | null | null | null |
examples/scripts/ephemeris/create_ephemeris_object.py
|
fossabot/pyaurorax
|
cb3e72a90f3107302d4f9fd4b0478fe98616354d
|
[
"MIT"
] | 45
|
2021-11-07T22:02:23.000Z
|
2022-03-09T03:04:27.000Z
|
examples/scripts/ephemeris/create_ephemeris_object.py
|
fossabot/pyaurorax
|
cb3e72a90f3107302d4f9fd4b0478fe98616354d
|
[
"MIT"
] | 1
|
2022-01-16T17:28:14.000Z
|
2022-01-16T17:28:14.000Z
|
import pyaurorax
import datetime
def main():
# set values
program = "themis"
platform = "themisa"
instrument_type = "footprint"
epoch = datetime.datetime(2020, 1, 1, 0, 0)
location_geo = pyaurorax.Location(lat=51.049999, lon=-114.066666)
location_gsm = pyaurorax.Location(lat=150.25, lon=-10.75)
nbtrace = pyaurorax.Location(lat=1.23, lon=45.6)
sbtrace = pyaurorax.Location(lat=7.89, lon=101.23)
metadata = {}
# get identifier
ds = pyaurorax.sources.get(program=program,
platform=platform,
instrument_type=instrument_type,
format=pyaurorax.FORMAT_IDENTIFIER_ONLY)
# create Ephemeris object
e = pyaurorax.ephemeris.Ephemeris(data_source=ds,
epoch=epoch,
location_geo=location_geo,
location_gsm=location_gsm,
nbtrace=nbtrace,
sbtrace=sbtrace,
metadata=metadata)
# print
print(e)
# ----------
if (__name__ == "__main__"):
main()
| 31.333333
| 71
| 0.517185
|
6e8f58ac46fa19cd45495edd0db4cab9816e67bc
| 3,436
|
py
|
Python
|
VariableDictionnary.py
|
Didou09/GEMMES-1
|
4b95e53bedff47da8bc063308102982708b7e6af
|
[
"MIT"
] | null | null | null |
VariableDictionnary.py
|
Didou09/GEMMES-1
|
4b95e53bedff47da8bc063308102982708b7e6af
|
[
"MIT"
] | null | null | null |
VariableDictionnary.py
|
Didou09/GEMMES-1
|
4b95e53bedff47da8bc063308102982708b7e6af
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def VariableDictionnary(result) :
"""
Contains additional informations for each variables
VarDictype : name of each variable, unit, intensive or extensive
OrganizedVar : list of variables that lookalike
"""
VarDictype = {
### EXTENSIVE VARIBLES
'Y' : {'name':'Output',
'type':'extensive',
'unit':'technological unit'},
'Pi': {'name':'$\Pi$ absolute profit',
'type':'extensive',
'unit':'technological unit'},
'W' : {'name':'Salary',
'type':'extensive',
'unit':'Money per human'},
'L' : {'name':'Labor',
'type':'extensive',
'unit':'Humans'},
'D' : {'name':'Total debt',
'type':'extensive',
'unit':'Money'},
'N' : {'name':'Population',
'type':'extensive',
'unit':'Humans'},
'P' : {'name':'Price',
'type':'extensive',
'unit':'Money'},
'K' : {'name':'Kapital',
'type':'extensive',
'unit':'technological unit'},
'I' : {'name':'Investment',
'type':'extensive',
'unit':'technological unit'},
'a' : {'name':'productivity',
'type':'intensive',
'unit':'Technological unit per human'},
### Typical rate of time
'philips' : {'name':'Wage inflation rate',
'type':'extensive',
'unit':'$t^{-1}$'},
'g' : {'name':'Output growth',
'type':'extensive',
'unit':'$t^{-1}$'},
'i' : {'name':'inflation',
'type':'extensive',
'unit':'$t^{-1}$'},
### INTENSIVE VARIABLES
'lambda': {'name':'employement rate',
'type':'intensive',
'unit':'no'},
'omega' : {'name':'wage share',
'type':'intensive',
'unit':'no'},
'pi' : {'name':'relative profit',
'type':'intensive',
'unit':'no'},
'kappa' : {'name':'relative investment to GDP',
'type':'intensive',
'unit':'no'},
'd' : {'name':'Relative debt',
'type':'intensive',
'unit':'no'},
}
Result_keys = result.keys()
VarDicType_keys = list(VarDictype.keys())
for key in VarDicType_keys:
if key not in Result_keys:
del VarDictype[key]
OrganizedVar = {
'intensive' : [f for f in VarDictype if VarDictype[f]['type']=='extensive'],
'extensive' : [f for f in VarDictype if VarDictype[f]['type']=='intensive'],
'rate' : [f for f in VarDictype if VarDictype[f]['unit']=='$t^{-1}$'],
'Money' : [f for f in VarDictype if VarDictype[f]['unit']=='Money'],
'Technological unit' : [f for f in VarDictype if VarDictype[f]['unit']=='technological unit'],
'Humans' : [f for f in VarDictype if VarDictype[f]['unit']=='Humans']
}
return VarDictype,OrganizedVar
| 38.177778
| 103
| 0.422584
|
386fa6a7a7dd25535588b2da920792e2dba6c3ff
| 1,950
|
py
|
Python
|
manage.py
|
Keegan-Evans/data302
|
b4e94b8c21953ec8d71089eab9c3284ce67437ce
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
Keegan-Evans/data302
|
b4e94b8c21953ec8d71089eab9c3284ce67437ce
|
[
"BSD-3-Clause"
] | 10
|
2019-10-25T19:10:14.000Z
|
2021-04-28T17:19:50.000Z
|
manage.py
|
Keegan-Evans/data302
|
b4e94b8c21953ec8d71089eab9c3284ce67437ce
|
[
"BSD-3-Clause"
] | 5
|
2020-01-16T15:37:00.000Z
|
2021-04-28T17:20:35.000Z
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import sys
from urllib.parse import urlparse
from pathlib import Path
from flask_script import Manager
import requests
from urls import MAP
from app import create_app
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
def _template(k, v):
return "{} =>\n {}".format(k, v)
def _validate_url(url):
# This is far from perfect, but should work fine for reasonable URLs
parsed = urlparse(url)
return bool(parsed.scheme or parsed.netloc)
@manager.command
def list():
for key, val in MAP.items():
print(_template(key, val))
@manager.command
def validate():
errors = []
for key, val in MAP.items():
f_key = 'https://data.qiime2.org/%s' % key
if not _validate_url(f_key) or not _validate_url(val):
errors.append(_template(key, val))
if errors:
sys.exit('\n'.join(errors))
@manager.command
def clone_remote_files(path):
root = Path(path)
for key, url in MAP.items():
base = Path(key)
p = root / base
if str(base.parents[1]) == 'distro':
p.parent.mkdir(parents=True, exist_ok=True)
continue
p.parent.mkdir(parents=True, exist_ok=True)
if p.name in ('sample_metadata', 'sample_metadata.tsv'):
continue
print("Fetching %s" % p)
r = requests.get(url, stream=True)
with p.open('wb') as f:
for stream_chunk in r.iter_content(chunk_size=1024):
if stream_chunk:
f.write(stream_chunk)
if __name__ == '__main__':
manager.run()
| 26
| 78
| 0.585641
|
f77aec1e6efce15ea7ba1ac3c332ebfdd2580718
| 368
|
py
|
Python
|
config.py
|
tweetbrain/twitter-test
|
7f558ec966baa4f6f7c82cbf78fa1e219565587d
|
[
"MIT"
] | null | null | null |
config.py
|
tweetbrain/twitter-test
|
7f558ec966baa4f6f7c82cbf78fa1e219565587d
|
[
"MIT"
] | null | null | null |
config.py
|
tweetbrain/twitter-test
|
7f558ec966baa4f6f7c82cbf78fa1e219565587d
|
[
"MIT"
] | null | null | null |
import json
import os
try:
with open('secrets.json', 'r') as secrets_file:
secrets = json.load(secrets_file)
except FileNotFoundError:
secrets = os.environ
class Config:
CONSUMER_KEY = secrets["CONSUMER_KEY"]
CONSUMER_SECRET = secrets["CONSUMER_SECRET"]
ACCESS_TOKEN = secrets["ACCESS_TOKEN"]
ACCESS_SECRET = secrets["ACCESS_SECRET"]
| 24.533333
| 51
| 0.717391
|
16aefd95b39bbd44f9390655069444b7ae4c6dda
| 516
|
py
|
Python
|
player/Player.py
|
TvSeriesFans/CineMonster
|
036a3223618afd536932d21b0e86d18d0fba3b28
|
[
"Apache-2.0"
] | 15
|
2017-09-17T17:52:43.000Z
|
2020-08-31T15:41:12.000Z
|
player/Player.py
|
TvSeriesFans/CineMonster
|
036a3223618afd536932d21b0e86d18d0fba3b28
|
[
"Apache-2.0"
] | 13
|
2017-03-14T13:24:14.000Z
|
2021-08-20T13:52:54.000Z
|
player/Player.py
|
TvSeriesFans/CineMonster
|
036a3223618afd536932d21b0e86d18d0fba3b28
|
[
"Apache-2.0"
] | 27
|
2017-07-01T18:33:49.000Z
|
2021-08-05T09:13:18.000Z
|
# -------------------------------------------
# CineMonster - A Quiz Bot for Telegram About Series and Movies
# @author: Edward "Toy" Facundo
# @site: http://edwardfacundo.wordpress.com
# -------------------------------------------
class Player:
id = ''
points = 0
name = ''
def __init__(self, uid):
self.id = uid
def get_points(self):
return self.points
def set_name(self, name):
self.name = name
def add_points(self, points):
self.points += points
| 21.5
| 63
| 0.501938
|
6d7743f5947c0111bdf1729f506de4c8be68e88a
| 6,702
|
py
|
Python
|
core/model/.ipynb_checkpoints/net1-checkpoint.py
|
yoohyewony/mcan
|
5e6fa78ca76d05fa936d473d3cf7ca9c563a354e
|
[
"Apache-2.0"
] | null | null | null |
core/model/.ipynb_checkpoints/net1-checkpoint.py
|
yoohyewony/mcan
|
5e6fa78ca76d05fa936d473d3cf7ca9c563a354e
|
[
"Apache-2.0"
] | null | null | null |
core/model/.ipynb_checkpoints/net1-checkpoint.py
|
yoohyewony/mcan
|
5e6fa78ca76d05fa936d473d3cf7ca9c563a354e
|
[
"Apache-2.0"
] | null | null | null |
# --------------------------------------------------------
# mcan-vqa (Deep Modular Co-Attention Networks)
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuhao Cui https://github.com/cuiyuhao1996
# --------------------------------------------------------
from core.model.net_utils import FC, MLP, LayerNorm
from core.model.mca import MCA_ED, MHAtt, FFN
import torch.nn as nn
import torch.nn.functional as F
import torch
from .utils import grad_mul_const
# ------------------------------
# ---- Flatten the sequence ----
# ------------------------------
class AttFlat(nn.Module):
def __init__(self, __C):
super(AttFlat, self).__init__()
self.__C = __C
self.mlp = MLP(
in_size=__C.HIDDEN_SIZE, # 512
mid_size=__C.FLAT_MLP_SIZE, # 512
out_size=__C.FLAT_GLIMPSES, # 1
dropout_r=__C.DROPOUT_R,
use_relu=True
)
self.linear_merge = nn.Linear(
__C.HIDDEN_SIZE * __C.FLAT_GLIMPSES, # 512 * 1
__C.FLAT_OUT_SIZE # 1024
)
def forward(self, x, x_mask):
att = self.mlp(x)
att = att.masked_fill(
x_mask.squeeze(1).squeeze(1).unsqueeze(2),
-1e9
)
att = F.softmax(att, dim=1)
att_list = []
for i in range(self.__C.FLAT_GLIMPSES):
att_list.append(
torch.sum(att[:, :, i: i + 1] * x, dim=1)
)
x_atted = torch.cat(att_list, dim=1)
x_atted = self.linear_merge(x_atted)
return x_atted
# -------------------------
# ---- Main MCAN Model ----
# -------------------------
class Net(nn.Module):
def __init__(self, __C, pretrained_emb, token_size, answer_size):
super(Net, self).__init__()
self.embedding = nn.Embedding(
num_embeddings=token_size,
embedding_dim=__C.WORD_EMBED_SIZE # 300
)
# Loading the GloVe embedding weights
if __C.USE_GLOVE:
self.embedding.weight.data.copy_(torch.from_numpy(pretrained_emb))
self.lstm = nn.LSTM(
input_size=__C.WORD_EMBED_SIZE,
hidden_size=__C.HIDDEN_SIZE,
num_layers=1,
batch_first=True
)
self.img_feat_linear = nn.Linear(
__C.IMG_FEAT_SIZE, # Faster-rcnn 2048D features
__C.HIDDEN_SIZE
)
self.backbone = MCA_ED(__C)
self.attflat_img = AttFlat(__C)
self.attflat_lang = AttFlat(__C)
self.proj_norm_img = LayerNorm(__C.FLAT_OUT_SIZE)
self.proj_norm_lang = LayerNorm(__C.FLAT_OUT_SIZE)
self.proj_img = nn.Linear(__C.FLAT_OUT_SIZE, answer_size)
self.proj_lang = nn.Linear(__C.FLAT_OUT_SIZE, answer_size)
def forward(self, img_feat, ques_ix):
# Make mask
lang_feat_mask = self.make_mask(ques_ix.unsqueeze(2))
img_feat_mask = self.make_mask(img_feat)
# Pre-process Language Feature
lang_feat = self.embedding(ques_ix)
q_embed, _ = self.lstm(lang_feat)
# Pre-process Image Feature
img_feat = self.img_feat_linear(img_feat)
# Backbone Framework
lang_feat, img_feat = self.backbone(
q_embed,
img_feat,
lang_feat_mask,
img_feat_mask
)
lang_feat = self.attflat_lang(
lang_feat,
lang_feat_mask
)
img_feat = self.attflat_img(
img_feat,
img_feat_mask
)
# Linear multimodal fusion function
img_feat = self.proj_norm_img(img_feat) # Layer Normalization
lang_feat = self.proj_norm_lang(lang_feat)
proj_feat_img = self.proj_img(img_feat)
proj_feat_lang = self.proj_lang(lang_feat)
proj_feat = torch.sigmoid(proj_feat_img + proj_feat_lang)
return proj_feat, proj_feat_img, proj_feat_lang
# Masking
def make_mask(self, feature):
return (torch.sum(
torch.abs(feature),
dim=-1
) == 0).unsqueeze(1).unsqueeze(2)
def Diff_loss(lang_out, img_out, pred, target, diff = 0.1):
q_pred = torch.argmax(lang_out.long(),dim=1)
#img_pred = torch.argmax(img_out.long(),dim=1)
loss = 0
for i in range(64):
if (pred[i] == target[i]):
p = img_out[i, pred[i]]/(img_out[i, pred[i]] + lang_out[i, pred[i]])
if ((2*p - 1) >= diff):
loss = 0
else:
loss = (2*p - 1 - diff)**2
else:
#q_pred = torch.argmax(lang_out.long(),dim=1)
if (q_pred[i] != target[i]):
#print(lang_out[i].size())
loss = (2*F.softmax(lang_out[i])[q_pred[i]])**2
return loss
'''
class Diff_loss(torch.autograd.Function):
def forward(ctx, lang_out, img_out, pred, target):
ctx.save_for_backward(ctx, lang_out, img_out, pred, target)
diff = 0.1
q_pred = torch.argmax(lang_out.long(),dim=1)
#img_pred = torch.argmax(img_out.long(),dim=1)
loss = 0
for i in range(64):
if (pred[i] == target[i]):
p = img_out[i, pred[i]]/(img_out[i, pred[i]] + lang_out[i, pred[i]])
if ((2*p - 1) >= diff):
loss = 0
else:
loss = (2*p - 1 - diff)**2
else:
#q_pred = torch.argmax(lang_out.long(),dim=1)
if (q_pred[i] != target[i]):
#print(lang_out[i].size())
loss = F.softmax(lang_out[i], dim=0)[q_pred[i]]**2
return loss
def backward(ctx, grad_output):
#lang_b, img_b, pred_b, target_b, diff_b = ctx.saved_tensors
lang_out, img_out, pred, target = ctx.saved_tensors
diff = 0.1
grad_input = grad_output.clone()
q_pred = torch.argmax(lang_out.long(),dim=1)
#img_pred = torch.argmax(img_out.long(),dim=1)
for i in range(64):
if (pred[i] == target[i]):
p = img_out[i, pred[i]]/(img_out[i, pred[i]] + lang_out[i, pred[i]])
if ((2*p - 1) >= diff):
grad_input = 0
else:
grad_input = 4*(1 + diff - 2*p)
else:
#q_pred = torch.argmax(lang_out.long(),dim=1)
if (q_pred[i] != target[i]):
#print(lang_out[i].size())
grad_input = 2*F.softmax(lang_out[i], dim=0)[q_pred[i]]
else:
grad_input = 0
return grad_input
'''
| 31.464789
| 84
| 0.524918
|
a0011f8ce288bb21f7624764ccbafdf43fc4e032
| 264,397
|
py
|
Python
|
2022-02-09/frames_to_video_converter/frames_to_video_converter.py
|
GrahamAnto/cannabis-data-science
|
1d5f3085e7b2858b6791840b90335be4669268b3
|
[
"MIT"
] | 1
|
2022-03-10T12:37:02.000Z
|
2022-03-10T12:37:02.000Z
|
2022-02-09/frames_to_video_converter/frames_to_video_converter.py
|
GrahamAnto/cannabis-data-science
|
1d5f3085e7b2858b6791840b90335be4669268b3
|
[
"MIT"
] | null | null | null |
2022-02-09/frames_to_video_converter/frames_to_video_converter.py
|
GrahamAnto/cannabis-data-science
|
1d5f3085e7b2858b6791840b90335be4669268b3
|
[
"MIT"
] | null | null | null |
<!DOCTYPE html>
<html lang="en" data-color-mode="dark" data-light-theme="light" data-dark-theme="dark">
<head>
<meta charset="utf-8">
<link rel="dns-prefetch" href="https://github.githubassets.com">
<link rel="dns-prefetch" href="https://avatars.githubusercontent.com">
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com">
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/">
<link rel="preconnect" href="https://github.githubassets.com" crossorigin>
<link rel="preconnect" href="https://avatars.githubusercontent.com">
<link crossorigin="anonymous" media="all" integrity="sha512-UrAu23+eyncWvaQFwsLbgSKtmLb2aH1bcT4hJnnRdkaPuY1eu9bumt33FyHHFDX8hskTUNWNkIsMCz7FWQQHwA==" rel="stylesheet" href="https://github.githubassets.com/assets/dark-52b02edb7f9eca7716bda405c2c2db81.css" /><link data-color-theme="light" crossorigin="anonymous" media="all" integrity="sha512-dkuYFW+ra8yYSt342e5pJEeslPSjMcrMvNxlYZMyM/X+/WJHDPvoCuGq3LFojI7B0dQWwZNRiPMnbi9IfUgTaA==" rel="stylesheet" data-href="https://github.githubassets.com/assets/light-764b98156fab6bcc984addf8d9ee6924.css" /><link data-color-theme="dark_dimmed" crossorigin="anonymous" media="all" integrity="sha512-kyu73YWtU8Fu2e7p+Hv094CRhaTvr8yy95vc1SQ2+MeWVWakGeIh/lv9yIFaYAb8J3oM6uBLGcn1kS6M1GxBCQ==" rel="stylesheet" data-href="https://github.githubassets.com/assets/dark_dimmed-932bbbdd85ad53c16ed9eee9f87bf4f7.css" /><link data-color-theme="dark_high_contrast" crossorigin="anonymous" media="all" integrity="sha512-jZSKF7Gx8T/AFthO0CUkWWpG5EBlIZb+tIYu8KgP/kizn7fpXEiXJcB73GTZ69wSVVSZ6Y1Cw286qP7pVZr0gg==" rel="stylesheet" data-href="https://github.githubassets.com/assets/dark_high_contrast-8d948a17b1b1f13fc016d84ed0252459.css" /><link data-color-theme="dark_colorblind" crossorigin="anonymous" media="all" integrity="sha512-E02WD8opZPpYu6LM9dlUSIHQgXtLmzi1KxMnaN/SA7k6ILsvpNJjpkBPU1sC98MitAOkCNIe6ozqY8+pHnrHZg==" rel="stylesheet" data-href="https://github.githubassets.com/assets/dark_colorblind-134d960fca2964fa58bba2ccf5d95448.css" /><link data-color-theme="light_colorblind" crossorigin="anonymous" media="all" integrity="sha512-VWdBPHZj3WCDwaO0N2W8yvDZt7TNZohRIYK4sjjSU56485rCWazxnLr4p3DU8eqn2+eSj3CYYpw4+DzmwHOwew==" rel="stylesheet" data-href="https://github.githubassets.com/assets/light_colorblind-5567413c7663dd6083c1a3b43765bcca.css" /><link data-color-theme="light_high_contrast" crossorigin="anonymous" media="all" integrity="sha512-dw8LrBQMvo9HDd5lo2UEp/tvMVR6zJjrQkQTBVrhyaHDlL1p7UiQ9/xpqYxOz9s7s1Qh5Bjokuzu7NX0U5BeYA==" rel="stylesheet" data-href="https://github.githubassets.com/assets/light_high_contrast-770f0bac140cbe8f470dde65a36504a7.css" />
<link crossorigin="anonymous" media="all" integrity="sha512-VvfOEx8F6LRa1nh/1LEiw5jFLK0kJ+WE1LvuhfNDQkGC8vTGi5yYqCdPEPeBM9lNG7/pQ5cSU2/gOdE03JLCMg==" rel="stylesheet" href="https://github.githubassets.com/assets/frameworks-56f7ce131f05e8b45ad6787fd4b122c3.css" />
<link crossorigin="anonymous" media="all" integrity="sha512-U0ckQKSRKQsYimFluKQtesny2QRTu0T20/Ti1bM+v4re0n1cY38DmtmJEHEPxgNB+/o/hJSqUBuux5K+4/ke5Q==" rel="stylesheet" href="https://github.githubassets.com/assets/behaviors-53472440a491290b188a6165b8a42d7a.css" />
<link crossorigin="anonymous" media="all" integrity="sha512-snW0Fh5VJcWGF5bU9u1W6WMlfbLDsAn9vLx8z6b37RVWr59btn7RpwdWg+w3+gsN31ZwKyYe9MTowM8R8B7WLg==" rel="stylesheet" href="https://github.githubassets.com/assets/tab-size-fix-b275b4161e5525c5861796d4f6ed56e9.css" />
<link crossorigin="anonymous" media="all" integrity="sha512-Qzq39I/EbDlTv7Z4hLoKfQAdpFidWgo0oCpa1+8oPObSM4kRqDRBEiVmWumlH1UhX2ixsqym1UEjlkSidOENpw==" rel="stylesheet" href="https://github.githubassets.com/assets/github-433ab7f48fc46c3953bfb67884ba0a7d.css" />
<script crossorigin="anonymous" defer="defer" integrity="sha512-hCD+yuHX6Po0snD3NcNEqdv4OBBgSdprm+w6CWjfRf/9iXhbuq+6POgT1cqLuNsrBzO5ZIvbOP/X+NTA2HoxJA==" type="application/javascript" src="https://github.githubassets.com/assets/environment-8420feca.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-LJXi8Cxsi2j+W9W54nrsvw6g6AEvi7BfA/XxQDK0iMSrz7kwqSIBsHtf28x2qWGUkR01V7YrXMEwSESIgNBVlA==" type="application/javascript" src="https://github.githubassets.com/assets/chunk-frameworks-2c95e2f0.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-jxdgfy3el93S2PtWfwnEFQ09hE4m4wLQ7FOR+X29/hWBO7EuurKFML33N+NGADeBtLw/iG36JeJxJlOJnN5gTw==" type="application/javascript" src="https://github.githubassets.com/assets/chunk-vendor-8f17607f.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-3O2orhTHmTf+Xa1fwl916/E5oRTdoNvVy5e6wUhfBH0sk69/NkPxoic5bH+Gf6Ov7p09W5ME7VcFZ9Qa0MK0ow==" type="application/javascript" src="https://github.githubassets.com/assets/behaviors-dceda8ae.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Ip1g9nLaGA4uZCiYbH0piTsMPZjLUu2qLCwlYisc1YiF4aqF11l4Yo7jezS6aYrlA9wMlweJaRiF07k4E9o48Q==" type="application/javascript" src="https://github.githubassets.com/assets/notifications-global-229d60f6.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-ODZJzCJpaOfusrIka5QVZQcPiO9LBGyrrMYjhhJWSLuCN5WbZ5xiEiiOPOKVu71dqygyRdB2TY7AKPA1J5hqdg==" type="application/javascript" data-module-id="./chunk-unveil.js" data-src="https://github.githubassets.com/assets/chunk-unveil-383649cc.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-iG8aKaXiAy7ONu7OJuY1pf3FGFzrs4mYYGnUD47a1kXP5URyMlP1RpAFyWTFeyirHY/UoR2mmkkBD40j/9ouPA==" type="application/javascript" data-module-id="./chunk-animate-on-scroll.js" data-src="https://github.githubassets.com/assets/chunk-animate-on-scroll-886f1a29.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Q8q7N7XzNXhgaWGGb+GJg1j747S64eBnN4i9fBnc2ZC+iaTX4RNv7nP9QyHXBqKHusT2KuXfOGc2hO6STYS38A==" type="application/javascript" data-module-id="./chunk-input-demux.js" data-src="https://github.githubassets.com/assets/chunk-input-demux-43cabb37.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-4IpwRQD5H5sS25dFsXdnFe6BSP4O+htDyc/2PWDtw8ofM2n2Ql44q31HxHNLgedBzQcdzQjBYuqdo+vcFOlpSg==" type="application/javascript" data-module-id="./chunk-ref-selector.js" data-src="https://github.githubassets.com/assets/chunk-ref-selector-e08a7045.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-iOOLdgqJ+9nIsmgDk8G3/VXnNT5WVKtigTTDNim7RiAGAwo5ZhHeiwPKyxnPdpHoUhusFkQOyjv/G5+mPAaY6Q==" type="application/javascript" data-module-id="./chunk-filter-input.js" data-src="https://github.githubassets.com/assets/chunk-filter-input-88e38b76.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-lJhSlHBxDYfafMGsvjfmbReBxHE64RGTSucXtcG3vTpWvu2vlw/heQjiHB+JwYpnWvcXh0Tn9oLlo90LEpUfIA==" type="application/javascript" data-module-id="./chunk-edit.js" data-src="https://github.githubassets.com/assets/chunk-edit-94985294.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-oJMW0D2NMJDWZ3hNGcsi+GGSEKcrjdyKORlra0Gv0nlZ3Pc654AfESFBwPkfcin7RTP4VIS8ze8G+csRc4Kk8w==" type="application/javascript" data-module-id="./chunk-responsive-underlinenav.js" data-src="https://github.githubassets.com/assets/chunk-responsive-underlinenav-a09316d0.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-gmw7obKL/JEHWPp6zWFh+ynbXUFOidj1DN2aPiTDwP8Gair0moVuDmA340LD84A29I3ZPak19CEiumG+oIiseg==" type="application/javascript" data-module-id="./chunk-tag-input.js" data-src="https://github.githubassets.com/assets/chunk-tag-input-826c3ba1.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-AdShwMIaVrtYr+LC9cc7rsC64wQJgoCdDjLJXaD4kVhdmuld7dk748AQueuoomE4OZypdoHSPmxQ56Wge/qtig==" type="application/javascript" data-module-id="./chunk-cookies.js" data-src="https://github.githubassets.com/assets/chunk-cookies-01d4a1c0.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-+4dXpYDj5COKTdic7RPF4KJt8XQDKgtbcNHUdojLqRYPO9tK4A9RFVr61WpJWW8RF1YMGBNbj8UJtTECowOjVw==" type="application/javascript" data-module-id="./chunk-async-export.js" data-src="https://github.githubassets.com/assets/chunk-async-export-fb8757a5.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-qEMigPltoBeFtlfHd0r2kjylnlwHjhf9+QfoggBvDF5uYSMtRBn1YvtRU7nwpoWRwtH/PQ/eFTNkSNKjQ1mauQ==" type="application/javascript" data-module-id="./chunk-premium-runners.js" data-src="https://github.githubassets.com/assets/chunk-premium-runners-a8432280.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-I5GWAgLNL1Ik55RAlli3NkR3h7jovmTh1NBBIZfPn47Y4ALgUdIV/bVqkysygt0Rpv/Zo/ISZQJJaFk9daQAjQ==" type="application/javascript" data-module-id="./chunk-get-repo-element.js" data-src="https://github.githubassets.com/assets/chunk-get-repo-element-23919602.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Lva0K2dttdT/1vwV7Q3JM6VfvjQkSjNACcCAoxRxiBEqUiSsYk6LI2WM3AvwZGpq3aKoTwDD5ZugA5H0uoWbOA==" type="application/javascript" data-module-id="./chunk-command-palette-item-element.js" data-src="https://github.githubassets.com/assets/chunk-command-palette-item-element-2ef6b42b.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-jitxouuFY6SUcDZV5W3jhadVEIfFBfCQZxfPV3kxNnsWEBzbxMJFp0ccLb7+OlBjSs1zU/MNtuOV6T9Ay7lx4w==" type="application/javascript" data-module-id="./chunk-copy.js" data-src="https://github.githubassets.com/assets/chunk-copy-8e2b71a2.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-xhSAO0KtnFAlRqAK+mg8BPj/J334ccvnCmmjmBQBCgZcsoO9teHJSS6oAn3XOWYFsWPU2JehwG7S3OVEbLwdUg==" type="application/javascript" data-module-id="./chunk-color-modes.js" data-src="https://github.githubassets.com/assets/chunk-color-modes-c614803b.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-6yI/t7GcFRajrquWdg5NqQdrvgI4rWRWcuZhNmAHH/T2aw2iB0uAco9nNYoETW3zzJ+tmnC7qqfArrHaYmk1oQ==" type="application/javascript" data-module-id="./chunk-voting.js" data-src="https://github.githubassets.com/assets/chunk-voting-eb223fb7.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-roc/0cfaua9wDuXAOgLBYymh7ketXEgdvrXH5cKsDrwrHA8RdLZyyKvncKkwMTtAXvFBYnhOfxrRYJuu5kCyCQ==" type="application/javascript" data-module-id="./chunk-spoofed-commit-warning.js" data-src="https://github.githubassets.com/assets/chunk-spoofed-commit-warning-ae873fd1.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-L96VWoPFQ2rcStzFXdzmgexD5TB56eHz/MbCvprelVTX3ZNK4c8SDHc+UTdyoETQdpHCQ1bUaG1mOS8fX8tS3g==" type="application/javascript" data-module-id="./chunk-file-filter-persistence.js" data-src="https://github.githubassets.com/assets/chunk-file-filter-persistence-2fde955a.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-HDsLJf6gAN+WDFaJneJwmIY82XkZKWqeX7tStBLRh1XM53K8vMV6JZvjq/UQXszaNVWxWcuYtgYTG6ZWo8+QSw==" type="application/javascript" data-module-id="./chunk-confetti.js" data-src="https://github.githubassets.com/assets/chunk-confetti-1c3b0b25.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-pxcoVPYf+wMELFDORaUGcDXn+h6nlJctBeC26Y8Fudx0R4gFeeFRq9F4nh2+7+TS/qtpkakrxBBfSyb20sl3pg==" type="application/javascript" data-module-id="./chunk-remote-content-element.js" data-src="https://github.githubassets.com/assets/chunk-remote-content-element-a7172854.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-jNV3X9rYbM+OIv29fcPTKqJczxgGRunhkDDAUH0GyzLq5hCzf7+wgNpad/lP7JsAn3qKzi3j0fx2YKh0lwCRug==" type="application/javascript" data-module-id="./chunk-codemirror.js" data-src="https://github.githubassets.com/assets/chunk-codemirror-8cd5775f.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-gs5g5mTDedn2nKjW/raEMmyQ6mWY79wiFNzpKOAOVaUBo/aQvaRSXYMzhIcj0JqeDbxcLTL1hIScWa4d5xHLdg==" type="application/javascript" data-module-id="./chunk-linear.js" data-src="https://github.githubassets.com/assets/chunk-linear-82ce60e6.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-QBztfKUsN1b/9rMkgSqC2ihmMMFXwkgj5S9ANmYpv8/gweoT4fiQ8JNPuUJjOf/3Ye68WjzPh3ng0nKx5RCGLQ==" type="application/javascript" data-module-id="./chunk-time.js" data-src="https://github.githubassets.com/assets/chunk-time-401ced7c.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-eciwf2HTDpjReg34mQeXAZbmrnuWPBJt5WNWOGHTxmmPw12bYb2h+v8zyczvis7LSf9LIRRbAGcB/ETbGSkKfQ==" type="application/javascript" data-module-id="./chunk-tip.js" data-src="https://github.githubassets.com/assets/chunk-tip-79c8b07f.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-kVy17IyyakYvFLZI9rEXv7Ni4dfbFfBygl1qejVCwedpibW58SzWthRzJ5hMgVu1PVtAayoagV4cN0WbYs0CaQ==" type="application/javascript" data-module-id="./chunk-format-symbol.js" data-src="https://github.githubassets.com/assets/chunk-format-symbol-915cb5ec.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Lw2AGUwZgwq0nrGFjtFhaQ/XUah8Giatq614Y5XUdiUgUh8jg1i2wcjgW3bfZwTiDlf9l4aqJ24FDwD3HLewoA==" type="application/javascript" data-module-id="./chunk-array.js" data-src="https://github.githubassets.com/assets/chunk-array-2f0d8019.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-GHMEiXdvG00Lv7Tnj+S7/TnqlcyVi41j6xapftzfnYoGWz5iYCMK5XCoDFmCmSgliaA0pmVHO356D2u3zJCicw==" type="application/javascript" data-module-id="./chunk-line.js" data-src="https://github.githubassets.com/assets/chunk-line-18730489.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-rm1qIXRm4iMgRzHKiWNxs89dEXeQEvDZZ7qiYVFDhwbPBgNGl2Thwc/fSe8jjtq9eErkecWSDrFycssBZgoxyw==" type="application/javascript" data-module-id="./chunk-pointer.js" data-src="https://github.githubassets.com/assets/chunk-pointer-ae6d6a21.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-1cqBdZXpCRV8LLTrMwY4VfNje6ononVqvBym1OaEB8i4PUyW7ptZII9zFgiJuYJ01q9LWyHkLV3QpH5Sk0rfIA==" type="application/javascript" data-module-id="./chunk-traffic.js" data-src="https://github.githubassets.com/assets/chunk-traffic-d5ca8175.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-EmcMFfBwlqjUocetONlS5pJXtIXfGdn6I2HLZtvgAf7Hnk1odXvaqvwgjVo6OcS7aA3hHG4mMN/LJUa3dJ6qYA==" type="application/javascript" data-module-id="./chunk-index.js" data-src="https://github.githubassets.com/assets/chunk-index-12670c15.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-euLLeTFpj2DeM/lF0qLZedbrh2iLKL86jMNj8ydBs0Dr24s0ZxmdYnenUo2UIVD8r3t2DgXaahNQmstHlXpmtA==" type="application/javascript" data-module-id="./chunk-extent.js" data-src="https://github.githubassets.com/assets/chunk-extent-7ae2cb79.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-MYHfk4RC1G7um5oS3Cg8p/6FekxAiWQQvmCR526sy3/Yxbe6WEgDm5D64atvKXEqPps/0VgTxmSUH4XICbv5sw==" type="application/javascript" data-module-id="./chunk-nodrag.js" data-src="https://github.githubassets.com/assets/chunk-nodrag-3181df93.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-MU7VY0mux9rnjbuPVXGG86/+7UndHVzcSyt3JvD+cxDXvvPzpQA8k/XrZHykVBw+5qUSjjjAnQ5WD/T/q5EJng==" type="application/javascript" data-module-id="./chunk-range.js" data-src="https://github.githubassets.com/assets/chunk-range-314ed563.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-wpN+s55e0JkuffVR++S87PjAhQog0M/U4+l4pD/Ps8w9yNma6Pdmeij+RTxCSdDzqjgC9knsjPpZ5+ohkRd4ww==" type="application/javascript" data-module-id="./chunk-min.js" data-src="https://github.githubassets.com/assets/chunk-min-c2937eb3.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-/AQ6MsKM/fwGtOejF9CLiYnp0UjOyFdLAFxB/5Xlt5U21nZgIbfAiGLtkcDLjqt9f0GsQF95rNpNdJAVgfdDKg==" type="application/javascript" data-module-id="./chunk-branch-from-issue-button.js" data-src="https://github.githubassets.com/assets/chunk-branch-from-issue-button-fc043a32.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-4GJz2wyWwjq7P4hyx3qSkjvnTO7RG5cWvnePVXPB+Oji6MBVugAdl7kCTKbpX8+Ae2ONvGJwFzSc9A7m1pqzXw==" type="application/javascript" data-module-id="./chunk-toast.js" data-src="https://github.githubassets.com/assets/chunk-toast-e06273db.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-miaiZ1xkDsWBUsURHOmeYtbgVKQGnm1octCo/lDXUmPzDyjtubnHULRVw1AK+sttwdwyB0+LOyhIVAWCNSGx+A==" type="application/javascript" data-module-id="./chunk-delayed-loading-element.js" data-src="https://github.githubassets.com/assets/chunk-delayed-loading-element-9a26a267.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-5hEtVXv0HlYtpvCSDdjhSPIMuBRCQtmmYPBX2cdwfQnjcYNNQ63X+qzck70pa9hTjQD6qKbWgWU3wjZ/ymyLuQ==" type="application/javascript" data-module-id="./chunk-three.module.js" data-src="https://github.githubassets.com/assets/chunk-three.module-e6112d55.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-x51kWxNaKiuBWTPZ+IX923ONOyqpFonKFirLe2zxxsIH6akLMo5eQR+aJu5XzrXE0tcaHMjk1Ko5mpJ8Kkp9Gg==" type="application/javascript" data-module-id="./chunk-notification-list-focus.js" data-src="https://github.githubassets.com/assets/chunk-notification-list-focus-c79d645b.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Mma9eU389bpaBMusL0AOdyVXlum506TbS103AlnuSSzmGYEPNtSmgFlSPx5S5wxJmj1Wc1NGtCDvs71md1bapQ==" type="application/javascript" data-module-id="./chunk-invitations.js" data-src="https://github.githubassets.com/assets/chunk-invitations-3266bd79.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-vFR+IqThljOLrAWmjhOL/kiQrjgZZg95uPovX0J7kRH5p7Y049LDRZaXLMDijfeqqk71d3MMn9XP5bUcH+lB9w==" type="application/javascript" data-module-id="./chunk-profile.js" data-src="https://github.githubassets.com/assets/chunk-profile-bc547e22.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-XPZ/UKr7xoeMUy/qNP/Xtz75lOQSAx79MciLVEyMPraKbo/fyGj/3UG5oXlF71WkZyOFVYl2L/beXCv9gCL9jw==" type="application/javascript" data-module-id="./chunk-overview.js" data-src="https://github.githubassets.com/assets/chunk-overview-5cf67f50.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-8lFeMPD4akYYJTK4foKlQ+lH3gdJwMFML7I5NJfsZCGouNEBtSLdHH6ZYgBZtGwfdu+ixG2RfUUZ1veqWKJYmw==" type="application/javascript" data-module-id="./chunk-advanced.js" data-src="https://github.githubassets.com/assets/chunk-advanced-f2515e30.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-vaGnAx5Fp/lV6x+tWNtSKS8H0hTaiXw2b4N16r0CYjAQ6Gcjl1BOWqWgmPPisyYO4drrI8Qz9rWJCWYTuOchjw==" type="application/javascript" data-module-id="./chunk-runner-groups.js" data-src="https://github.githubassets.com/assets/chunk-runner-groups-bda1a703.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-LIMrb+shvh29E7skwR+eSpxTd9oC6r3fs2ecZvSIVVMQhLYRNZBWcyT74n+f9fyXIY9pVC73SNzQ3x3iMsbsnw==" type="application/javascript" data-module-id="./chunk-yaml-linters.js" data-src="https://github.githubassets.com/assets/chunk-yaml-linters-2c832b6f.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-5H5N/3G/20nmVKntphXb9z0H9q3URFDmHSccLhFkMSA8ILAA9mYlRKCWAWoDcl/W437jtGw1tIxjWStfInvidw==" type="application/javascript" data-module-id="./chunk-profile-pins-element.js" data-src="https://github.githubassets.com/assets/chunk-profile-pins-element-e47e4dff.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-6WJL+zyYirKxwD8MNBenuxbMKvCeskXBrXISNlqhV3kltmI8kiSjUX0nDQM3fXeSakcll12sYS8Pli1GFPtG9Q==" type="application/javascript" data-module-id="./chunk-emoji-picker-element.js" data-src="https://github.githubassets.com/assets/chunk-emoji-picker-element-e9624bfb.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-EvJ2Fip59DXgARNuwTWgjdVqoCjhXQL73SP9yexijlWStKq92sfbKeGK5R4wIP0QOr39WsnW/Kaw3Wpl1QPfog==" type="application/javascript" data-module-id="./chunk-edit-hook-secret-element.js" data-src="https://github.githubassets.com/assets/chunk-edit-hook-secret-element-12f27616.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-MRVWeVjChpbblkQe6NzQxxNyRFM0HzPZGIMo4k6lcVCfKwL7lxwbpNCbSBGBkhaTwDRnwsyGseOmdotAEDRrVg==" type="application/javascript" data-module-id="./chunk-insights-query.js" data-src="https://github.githubassets.com/assets/chunk-insights-query-31155679.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-1qv2NZ7xA2F/hwAq4HdNLvjmBHM6neha9OUP9IqXA/VgxKvsbg/40HJN1sBXKZ6ufTb0/fokBXJHla8YCHAbTQ==" type="application/javascript" data-module-id="./chunk-remote-clipboard-copy.js" data-src="https://github.githubassets.com/assets/chunk-remote-clipboard-copy-d6abf635.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-YT1sZoBexRNsvw2mnXAtdf87/5j3cL8ji/WS6h9F0mMh0wyUEx3EElel6roRMoI2Zq+bn5d1i8TSCeZJ84a6Rw==" type="application/javascript" data-module-id="./chunk-series-table.js" data-src="https://github.githubassets.com/assets/chunk-series-table-613d6c66.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-nvcxK7gYOymLYhW1CFCo1ekOD8GNPcwrDtxHGekd0M9YoLTGJxWsnsyHCpibckpGOBp+nGSI1uDsMBWjpwAzWA==" type="application/javascript" data-module-id="./chunk-line-chart.js" data-src="https://github.githubassets.com/assets/chunk-line-chart-9ef7312b.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-2sIAqde8jcuqT8BVIBrNAiyVNGg+Op6JerDPrC4KUHs0zofkc5HDi98NgSymGSpZ4ZnAkhx8ZUNbmRmqAwMdrA==" type="application/javascript" data-module-id="./chunk-bar-chart.js" data-src="https://github.githubassets.com/assets/chunk-bar-chart-dac200a9.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-87pDIYns8Irb6MYjA2VaLhA3OWnSUvmabp5clh/gfvkropBJwL2bugqc4ICvqbRnjRQz3mAT6qvB9GDRSZ5k4A==" type="application/javascript" data-module-id="./chunk-stacked-area-chart.js" data-src="https://github.githubassets.com/assets/chunk-stacked-area-chart-f3ba4321.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-i15/Lscj9OK0WLprCYM07xLLnbLlzFVRUO10KGu3mRzVzjVox6vk4NZn6gtexLyeeG0t/FPAg+JY44Xhl8RwPQ==" type="application/javascript" data-module-id="./chunk-presence-avatars.js" data-src="https://github.githubassets.com/assets/chunk-presence-avatars-8b5e7f2e.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-EGc+csqoKPUwL2pzhE4bnuHalJ6aQK6GMl0LL0NapYlKYgOVu+DU5Dk20KlAn1o44j2xFbgbnr6+zuSri1Yabg==" type="application/javascript" data-module-id="./chunk-pulse-authors-graph-element.js" data-src="https://github.githubassets.com/assets/chunk-pulse-authors-graph-element-10673e72.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-tBLBnjccTx7kDAaE0oWKjTipmfQMUs2g9gLYvG4Y5UTJP0vmE+PB3GYbMXkHEUPlYHMpAHIp5NpLhXy68qCcSg==" type="application/javascript" data-module-id="./chunk-band.js" data-src="https://github.githubassets.com/assets/chunk-band-b412c19e.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Z5vxYqlrzgKkUgwWnpxHQ7mGgyY4QVo6rX+t+/+TEiFrVv4hH0bIN7cl/YvFJ5FVrLz/LFu7mhSnEWByM5sJXw==" type="application/javascript" data-module-id="./chunk-stacks-input-config-view.js" data-src="https://github.githubassets.com/assets/chunk-stacks-input-config-view-679bf162.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-6n5CAU+hn/JxTZ2JHp4WRfFdZzz3Ucc0KkXPDbSLYkZo7gJtpa/7CSFcIpNk5rXBWDRjH2PQXkNZyxYkPJfYiw==" type="application/javascript" data-module-id="./chunk-community-contributions.js" data-src="https://github.githubassets.com/assets/chunk-community-contributions-ea7e4201.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-nRr9tCW+A7HCZ7Fa3Ph4LGXO2HuUhAL+Q/QDj2r4KSdTt5ITBq1tUsfu4tlhVozGKpuAgZhBoYzCifaq501QBw==" type="application/javascript" data-module-id="./chunk-discussion-page-views.js" data-src="https://github.githubassets.com/assets/chunk-discussion-page-views-9d1afdb4.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-sYHmS7v+XI7Xevj7YY+iw36agJCINglbleSlaxlerVR60maGx80bUp8hN0ewOvB+X31FojqjG20AGqWDuE6OPQ==" type="application/javascript" data-module-id="./chunk-discussions-daily-contributors.js" data-src="https://github.githubassets.com/assets/chunk-discussions-daily-contributors-b181e64b.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-Akavlvu0VtbgOhne+qHvPFB+zryy4OPPiNUibRBqLUpXTTBHjqaZLiU2PvB2XAD7Vi1jStGlSO8gxZEVlraaTA==" type="application/javascript" data-module-id="./chunk-discussions-new-contributors.js" data-src="https://github.githubassets.com/assets/chunk-discussions-new-contributors-0246af96.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-eEVEnzhsfw53A/Zgq30yCOQoNt9rL/K0K4YhX1iM77ldszTVaOexc+s1nMLFRr6wp/AwHQ7i7OvNKtT7rFZ9/A==" type="application/javascript" data-module-id="./chunk-code-frequency-graph-element.js" data-src="https://github.githubassets.com/assets/chunk-code-frequency-graph-element-7845449f.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-DR/O+c2yX9jstdyxwVDlaM8zRuOACj/aap9ABecyw8toE3a9rXjQgytQlFPQu74JufIDXrD3E1xAOX+ZscWG1g==" type="application/javascript" data-module-id="./chunk-area.js" data-src="https://github.githubassets.com/assets/chunk-area-0d1fcef9.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-HefNUrgcprJYnZFaaw9zBu/5gbF3wP1rixNOdaFMsFiW369pWdJY6YoTOD2iVrMQoeakwq5w9mIFtTlT3OxPtw==" type="application/javascript" data-module-id="./chunk-contributors-graph-element.js" data-src="https://github.githubassets.com/assets/chunk-contributors-graph-element-1de7cd52.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-YUCb5GVAF8qPEnN/WtyP9j81k8qOlMzl35cLqVFLixJgBMqQ+ZT+ELoBUx2OTqs5IrYwqq9c+gk4O0qRN3h0EQ==" type="application/javascript" data-module-id="./chunk-locale.js" data-src="https://github.githubassets.com/assets/chunk-locale-61409be4.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-qRtCwE/rzoUN+fgtZuCt+hwYlRQwFP+U673JVjPGpS9xJ/HiqQkIg1Zio3i+9ibH2fgdagg8PSeHSMfVk1GFBg==" type="application/javascript" data-module-id="./chunk-org-insights-graph-element.js" data-src="https://github.githubassets.com/assets/chunk-org-insights-graph-element-a91b42c0.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-KYvUsn4da1p2jSxHAD73P5hIGEi09MUrWBCiJvkNBHC/Gr661CjYwEtxHNmz++FAePEhDyJYg0YZwyHryrWLow==" type="application/javascript" data-module-id="./chunk-traffic-clones-graph-element.js" data-src="https://github.githubassets.com/assets/chunk-traffic-clones-graph-element-298bd4b2.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-EqzDNw3gyq/2VXlWO1lZ9KqDmN+gEM25REcJL3o1oPuato1rjc9SnXGRbqVEauhrr6Cc6SHysPFLRUgRKKNZYA==" type="application/javascript" data-module-id="./chunk-traffic-visitors-graph-element.js" data-src="https://github.githubassets.com/assets/chunk-traffic-visitors-graph-element-12acc337.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-kqCMvyLSvUXo48YeAD47yCUmXy0Mjh/umQ74KTKST3xetriKO7DfCkohrfc/MIwSGxVi7Ev3dglnCn+InRDSow==" type="application/javascript" data-module-id="./chunk-commit-activity-graph-element.js" data-src="https://github.githubassets.com/assets/chunk-commit-activity-graph-element-92a08cbf.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-I7esFvr3IOWFqcBNIUL70K+MM95nG6ayY/xxmdswMXcFWRC9f43LKsYvRZy/xx35Tjs8Lc3fDpYgrcM87ysqQg==" type="application/javascript" data-module-id="./chunk-reload-after-polling-element.js" data-src="https://github.githubassets.com/assets/chunk-reload-after-polling-element-23b7ac16.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-yG3+BphJlRF3n1Tk3jJbJAxN6DZC/2Ic5tk1/FD4v0eZ2sUAEqo0nU9C/V0QrkbUP0JgjEY4EAOiUfEZMUEJfA==" type="application/javascript" data-module-id="./chunk-package-dependencies-security-graph-element.js" data-src="https://github.githubassets.com/assets/chunk-package-dependencies-security-graph-element-c86dfe06.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-LrFD/6TEB4KoXgX7o9XDqbgp/Z9c2M05v7j4J+Pu5AoNU76hTxEerTqOBIM9cBQzvSjpCSoJV66hHqfyIeUMkg==" type="application/javascript" data-module-id="./chunk-tweetsodium.js" data-src="https://github.githubassets.com/assets/chunk-tweetsodium-2eb143ff.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-NXstHPoSxcIvEnlvY0dYPzAgOJLMkMVM1fqTCrjfC7PH6UkPQvainQYUDqJht0h+OjeZJMXPoZZ7syPUnLn8rQ==" type="application/javascript" data-module-id="./chunk-jump-to.js" data-src="https://github.githubassets.com/assets/chunk-jump-to-357b2d1c.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-R+tG3SLlxMfqFe/QsCsVkiImlyr9aDIRGs81jLQFG/8IC/WqUcHWrS4KJdn93V23uBIhL1CStpD5WOGltedi9g==" type="application/javascript" data-module-id="./chunk-turbo.es2017-esm.js" data-src="https://github.githubassets.com/assets/chunk-turbo.es2017-esm-47eb46dd.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-mLpA7cDmISDeXiqsCEbEzVF1WCiEO6weP+AP7bKfcNLYHg7ZNunX7b2yUWKPN+ByilarY1AM+uGcNH3MN5jnYQ==" type="application/javascript" data-module-id="./chunk-user-status-submit.js" data-src="https://github.githubassets.com/assets/chunk-user-status-submit-98ba40ed.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-L2jxBDS9QAN9O1qn7LqMcs0YJn/gF6xW73zSbWPRlVCEnG05dexaoJWkAG6RqALTnXLsj2GTUKnba6DATR828g==" type="application/javascript" data-module-id="./chunk-launch-code-element.js" data-src="https://github.githubassets.com/assets/chunk-launch-code-element-2f68f104.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-cvjyIYhR2ZkuFAXHYZSjPTc5wXYOdISgqbXw69CXpDXdxffXmXuzjCcGJNVk3mDNYsVH4Q9sb2UMNPFrNxxRUQ==" type="application/javascript" data-module-id="./chunk-metric-selection-element.js" data-src="https://github.githubassets.com/assets/chunk-metric-selection-element-72f8f221.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-0ho0SIKf1TsQpvFNdpkKw57DmpU8LampLRvO67Q0G+6744f/qI8uTtL96F5tOPT56dG40cwoKB8v6kYqyGipvw==" type="application/javascript" data-module-id="./chunk-severity-calculator-element.js" data-src="https://github.githubassets.com/assets/chunk-severity-calculator-element-d21a3448.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-OENoFROHye7heTY73V3e1hVoB/R9WEogs8khbU4+No5l9MQ7LL9g75sNdr1MJktV10uky5TrXZcreCoxKPVutw==" type="application/javascript" data-module-id="./chunk-command-palette-page-element.js" data-src="https://github.githubassets.com/assets/chunk-command-palette-page-element-38436815.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-CC2sHfr2HSQfHfyZVFNK/u4TUAEDxlhtdTcrW/B1p/9Wo4iD1c/4VTju8pva8VlNXVS5dEfK79zyar911dC7zw==" type="application/javascript" data-module-id="./chunk-command-palette-page-stack-element.js" data-src="https://github.githubassets.com/assets/chunk-command-palette-page-stack-element-082dac1d.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-4c1T1xmasSz69dd9MxySBvtZlnV5u3bFXJXlJLlFe57DoQJ/9FHWx8sVKFvrDjDCnmy+oEbB3Zw9FKduYh71XA==" type="application/javascript" data-module-id="./chunk-readme-toc-element.js" data-src="https://github.githubassets.com/assets/chunk-readme-toc-element-e1cd53d7.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-aGaoVKNIqNkSpelOnfn0UCDbQLW2XBUVVkOOgVZXFNDfgJgFQNMXALc0964DwIi9kYrkYQIShePOSMFo20hHkw==" type="application/javascript" data-module-id="./chunk-feature-callout-element.js" data-src="https://github.githubassets.com/assets/chunk-feature-callout-element-6866a854.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-JzgkuDECne1pSMuCeKgrahMjbTRn6ucMKHPekcNzZJU8LuTFndvIoaI11sdas7kFFEF3kvmK2iKadUx0L6P6mA==" type="application/javascript" data-module-id="./chunk-codespaces-policy-form-element.js" data-src="https://github.githubassets.com/assets/chunk-codespaces-policy-form-element-273824b8.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-s29T5r7y591Z4m7jsJtkZdSy2BSgcbUhqOZxQzmImaMYoDdL2MLvaLIvhd1lee/y4dg++F9OOLWxQ4P1cj+WwQ==" type="application/javascript" data-module-id="./chunk-action-list-element.js" data-src="https://github.githubassets.com/assets/chunk-action-list-element-b36f53e6.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-5r/m4Q9I8PFeDYmfZIE5Nmi+ua6kmrqzCpKXwr/9uKdNZe3Vcywf3vzVHxc8ggzU8ujHJqGIMTZXF/E2N+tFtg==" type="application/javascript" data-module-id="./chunk-file-filter-element.js" data-src="https://github.githubassets.com/assets/chunk-file-filter-element-e6bfe6e1.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-af75/blvTxPgJ+U/NJv1AdCatvyftFB8/C/cBLsMd5htNQuBoFQg6DHpHZhekoY/+o1hx4hqYI0pxYbAnDo0Nw==" type="application/javascript" data-module-id="./chunk-memex-project-picker-element.js" data-src="https://github.githubassets.com/assets/chunk-memex-project-picker-element-69fef9fd.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-ooYcnNLBDnMePhMvdQEQiItFZowYg4gwklGZGCrAWPW1LCxePPkzB1kr8U3Bay0NPKYEDmICeXBqqDPd8EDmqA==" type="application/javascript" data-module-id="./chunk-project-picker-element.js" data-src="https://github.githubassets.com/assets/chunk-project-picker-element-a2861c9c.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-SGxKZJPMoMDPJ4mVhReJ1NPmXxuTuePhizAUPkKphsivzZhVQstruqwewm3oUpP1yONOG8MITAodv/5iKRsEtw==" type="application/javascript" data-module-id="./chunk-sortable-behavior.js" data-src="https://github.githubassets.com/assets/chunk-sortable-behavior-486c4a64.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-T092OZtYeslxDIimT2TR4IWlO+6B9q6zenOAqRekKME3TgdR1nKinRgV0PTYZ3Y3IITjimtvjqQpN5kW11o0bw==" type="application/javascript" data-module-id="./chunk-drag-drop.js" data-src="https://github.githubassets.com/assets/chunk-drag-drop-4f4f7639.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-lVgRr0+tI0xOSqg0KRwUIXgHFNZVPIVkjok+tLfkuUNDcoM8CKRuoqbMlw2H7QQ3dfMN36XJbTy3ugG/qo8Czg==" type="application/javascript" data-module-id="./chunk-contributions-spider-graph.js" data-src="https://github.githubassets.com/assets/chunk-contributions-spider-graph-955811af.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-q34Q0/s/exmYECvsCRWCxp+22UY2cZK8MPrKv3LjI3bNU5AD3Fzv+JhN0Lgfvd2mgju3llixlQ4z2RvkTIw2oA==" type="application/javascript" data-module-id="./chunk-webgl-warp.js" data-src="https://github.githubassets.com/assets/chunk-webgl-warp-ab7e10d3.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-42F1hQV3NmnK/aikw++ctbC9ROw0br5kTb10r+K6Rp00mauyZMH5uZSH7996vSqNKUjfeacx4dxbNfxZ3ebhxQ==" type="application/javascript" src="https://github.githubassets.com/assets/repositories-e3617585.js"></script>
<script crossorigin="anonymous" defer="defer" integrity="sha512-AA1czZXYMzccGr1WH3aSH6ALnp0hsU3d8psNGvwyS0J4ANF4YnSkFr5N7jvQTTMp0nLelS/4IeCIW6qLbuA+Ag==" type="application/javascript" src="https://github.githubassets.com/assets/diffs-000d5ccd.js"></script>
<meta name="viewport" content="width=device-width">
<title>Rotten-Scripts/frames_to_video_converter.py at master · HarshCasper/Rotten-Scripts</title>
<meta name="description" content="Scripts that will make you go WOW! Contribute to HarshCasper/Rotten-Scripts development by creating an account on GitHub.">
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<meta property="fb:app_id" content="1401488693436528">
<meta name="apple-itunes-app" content="app-id=1477376905" />
<meta name="twitter:image:src" content="https://repository-images.githubusercontent.com/240786294/7c04b380-3e2f-11eb-8ec5-d27f335ffaaa" /><meta name="twitter:site" content="@github" /><meta name="twitter:card" content="summary_large_image" /><meta name="twitter:title" content="Rotten-Scripts/frames_to_video_converter.py at master · HarshCasper/Rotten-Scripts" /><meta name="twitter:description" content="Scripts that will make you go WOW! Contribute to HarshCasper/Rotten-Scripts development by creating an account on GitHub." />
<meta property="og:image" content="https://repository-images.githubusercontent.com/240786294/7c04b380-3e2f-11eb-8ec5-d27f335ffaaa" /><meta property="og:image:alt" content="Scripts that will make you go WOW! Contribute to HarshCasper/Rotten-Scripts development by creating an account on GitHub." /><meta property="og:site_name" content="GitHub" /><meta property="og:type" content="object" /><meta property="og:title" content="Rotten-Scripts/frames_to_video_converter.py at master · HarshCasper/Rotten-Scripts" /><meta property="og:url" content="https://github.com/HarshCasper/Rotten-Scripts" /><meta property="og:description" content="Scripts that will make you go WOW! Contribute to HarshCasper/Rotten-Scripts development by creating an account on GitHub." />
<link rel="assets" href="https://github.githubassets.com/">
<link rel="shared-web-socket" href="wss://alive.github.com/_sockets/u/19616734/ws?session=eyJ2IjoiVjMiLCJ1IjoxOTYxNjczNCwicyI6ODE5NzkwMTg5LCJjIjozNzU3MDc1MzIxLCJ0IjoxNjQ0MzM0MDExfQ==--929d1990a299c22990a1da9e947de679a10ef39c4c7ccc05203817944062d1c3" data-refresh-url="/_alive" data-session-id="d7a259f42024a631d5131ee05f4f075d6dd898659b29b4688af77e12ffc8b48b">
<link rel="shared-web-socket-src" href="/assets-cdn/worker/socket-worker-d162efab.js">
<link rel="sudo-modal" href="/sessions/sudo_modal">
<meta name="request-id" content="C071:21A0:4F6339:86E81F:62028B7D" data-pjax-transient="true" /><meta name="html-safe-nonce" content="2899a11eba59411001c935aaed186abeafde197a1f4f5d0444754cc2e4815a40" data-pjax-transient="true" /><meta name="visitor-payload" content="eyJyZWZlcnJlciI6Imh0dHBzOi8vZ2l0aHViLmNvbS9IYXJzaENhc3Blci9Sb3R0ZW4tU2NyaXB0cy90cmVlL21hc3Rlci9QeXRob24vRnJhbWVzX3RvX1ZpZGVvX2NvbnZlcnRlciIsInJlcXVlc3RfaWQiOiJDMDcxOjIxQTA6NEY2MzM5Ojg2RTgxRjo2MjAyOEI3RCIsInZpc2l0b3JfaWQiOiIxNDQ2OTAyMzE5NjI0ODM0MjE5IiwicmVnaW9uX2VkZ2UiOiJpYWQiLCJyZWdpb25fcmVuZGVyIjoiaWFkIn0=" data-pjax-transient="true" /><meta name="visitor-hmac" content="c892566651ea04a44b1cf9734a932d93dd97663c2589df747617a03be76ec567" data-pjax-transient="true" />
<meta name="hovercard-subject-tag" content="repository:240786294" data-pjax-transient>
<meta name="github-keyboard-shortcuts" content="repository,source-code" data-pjax-transient="true" />
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="c1kuD-K2HIVF635lypcsWPoD4kilo5-jA_wBFyT4uMY">
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-site-verification" content="GXs5KoUUkNCoaAZn7wPN-t01Pywp9M3sEjnt_3_ZWPc">
<meta name="octolytics-url" content="https://collector.github.com/github/collect" /><meta name="octolytics-actor-id" content="19616734" /><meta name="octolytics-actor-login" content="keeganskeate" /><meta name="octolytics-actor-hash" content="9e29b93a1d22d08e87e0c285cd22987da5dd992083095848510c7f72ac4aada1" />
<meta name="analytics-location" content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" />
<meta name="optimizely-datafile" content="{"version": "4", "rollouts": [], "typedAudiences": [], "anonymizeIP": true, "projectId": "16737760170", "variables": [], "featureFlags": [], "experiments": [{"status": "Running", "audienceIds": [], "variations": [{"variables": [], "id": "20438636352", "key": "control"}, {"variables": [], "id": "20484957397", "key": "treatment"}], "id": "20479227424", "key": "growth_ghec_onboarding_experience", "layerId": "20467848595", "trafficAllocation": [{"entityId": "20484957397", "endOfRange": 1000}, {"entityId": "20484957397", "endOfRange": 3000}, {"entityId": "20484957397", "endOfRange": 5000}, {"entityId": "20484957397", "endOfRange": 6000}, {"entityId": "20484957397", "endOfRange": 8000}, {"entityId": "20484957397", "endOfRange": 10000}], "forcedVariations": {"85e2238ce2b9074907d7a3d91d6feeae": "control"}}, {"status": "Running", "audienceIds": [], "variations": [{"variables": [], "id": "20667381018", "key": "control"}, {"variables": [], "id": "20680930759", "key": "treatment"}], "id": "20652570897", "key": "project_genesis", "layerId": "20672300363", "trafficAllocation": [{"entityId": "20667381018", "endOfRange": 5000}, {"entityId": "20680930759", "endOfRange": 10000}], "forcedVariations": {"83356e17066d336d1803024138ecb683": "treatment", "18e31c8a9b2271332466133162a4aa0d": "treatment", "10f8ab3fbc5ebe989a36a05f79d48f32": "treatment", "1686089f6d540cd2deeaec60ee43ecf7": "treatment"}}, {"status": "Running", "audienceIds": [], "variations": [{"variables": [], "id": "20898546114", "key": "control"}, {"variables": [], "id": "20923036705", "key": "treatment_a"}, {"variables": [], "id": "20965581308", "key": "treatment_b"}], "id": "20902325119", "key": "contact_sales_page_optimizations", "layerId": "20969031091", "trafficAllocation": [{"entityId": "20965581308", "endOfRange": 3330}, {"entityId": "20898546114", "endOfRange": 5000}, {"entityId": "20898546114", "endOfRange": 6670}, {"entityId": "20923036705", "endOfRange": 10000}], "forcedVariations": {}}], "audiences": [{"conditions": "[\"or\", {\"match\": \"exact\", \"name\": \"$opt_dummy_attribute\", \"type\": \"custom_attribute\", \"value\": \"$opt_dummy_value\"}]", "id": "$opt_dummy_audience", "name": "Optimizely-Generated Audience for Backwards Compatibility"}], "groups": [], "sdkKey": "WTc6awnGuYDdG98CYRban", "environmentKey": "production", "attributes": [{"id": "16822470375", "key": "user_id"}, {"id": "17143601254", "key": "spammy"}, {"id": "18175660309", "key": "organization_plan"}, {"id": "18813001570", "key": "is_logged_in"}, {"id": "19073851829", "key": "geo"}, {"id": "20175462351", "key": "requestedCurrency"}, {"id": "20785470195", "key": "country_code"}], "botFiltering": false, "accountId": "16737760170", "events": [{"experimentIds": [], "id": "17911811441", "key": "hydro_click.dashboard.teacher_toolbox_cta"}, {"experimentIds": [], "id": "18124116703", "key": "submit.organizations.complete_sign_up"}, {"experimentIds": [], "id": "18145892387", "key": "no_metric.tracked_outside_of_optimizely"}, {"experimentIds": [], "id": "18178755568", "key": "click.org_onboarding_checklist.add_repo"}, {"experimentIds": [], "id": "18180553241", "key": "submit.repository_imports.create"}, {"experimentIds": [], "id": "18186103728", "key": "click.help.learn_more_about_repository_creation"}, {"experimentIds": [], "id": "18188530140", "key": "test_event.do_not_use_in_production"}, {"experimentIds": [], "id": "18191963644", "key": "click.empty_org_repo_cta.transfer_repository"}, {"experimentIds": [], "id": "18195612788", "key": "click.empty_org_repo_cta.import_repository"}, {"experimentIds": [], "id": "18210945499", "key": "click.org_onboarding_checklist.invite_members"}, {"experimentIds": [], "id": "18211063248", "key": "click.empty_org_repo_cta.create_repository"}, {"experimentIds": [], "id": "18215721889", "key": "click.org_onboarding_checklist.update_profile"}, {"experimentIds": [], "id": "18224360785", "key": "click.org_onboarding_checklist.dismiss"}, {"experimentIds": [], "id": "18234832286", "key": "submit.organization_activation.complete"}, {"experimentIds": [], "id": "18252392383", "key": "submit.org_repository.create"}, {"experimentIds": [], "id": "18257551537", "key": "submit.org_member_invitation.create"}, {"experimentIds": [], "id": "18259522260", "key": "submit.organization_profile.update"}, {"experimentIds": [], "id": "18564603625", "key": "view.classroom_select_organization"}, {"experimentIds": [], "id": "18568612016", "key": "click.classroom_sign_in_click"}, {"experimentIds": [], "id": "18572592540", "key": "view.classroom_name"}, {"experimentIds": [], "id": "18574203855", "key": "click.classroom_create_organization"}, {"experimentIds": [], "id": "18582053415", "key": "click.classroom_select_organization"}, {"experimentIds": [], "id": "18589463420", "key": "click.classroom_create_classroom"}, {"experimentIds": [], "id": "18591323364", "key": "click.classroom_create_first_classroom"}, {"experimentIds": [], "id": "18591652321", "key": "click.classroom_grant_access"}, {"experimentIds": [], "id": "18607131425", "key": "view.classroom_creation"}, {"experimentIds": ["20479227424"], "id": "18831680583", "key": "upgrade_account_plan"}, {"experimentIds": [], "id": "19064064515", "key": "click.signup"}, {"experimentIds": [], "id": "19075373687", "key": "click.view_account_billing_page"}, {"experimentIds": [], "id": "19077355841", "key": "click.dismiss_signup_prompt"}, {"experimentIds": [], "id": "19079713938", "key": "click.contact_sales"}, {"experimentIds": [], "id": "19120963070", "key": "click.compare_account_plans"}, {"experimentIds": [], "id": "19151690317", "key": "click.upgrade_account_cta"}, {"experimentIds": [], "id": "19424193129", "key": "click.open_account_switcher"}, {"experimentIds": [], "id": "19520330825", "key": "click.visit_account_profile"}, {"experimentIds": [], "id": "19540970635", "key": "click.switch_account_context"}, {"experimentIds": [], "id": "19730198868", "key": "submit.homepage_signup"}, {"experimentIds": [], "id": "19820830627", "key": "click.homepage_signup"}, {"experimentIds": [], "id": "19988571001", "key": "click.create_enterprise_trial"}, {"experimentIds": [], "id": "20036538294", "key": "click.create_organization_team"}, {"experimentIds": [], "id": "20040653299", "key": "click.input_enterprise_trial_form"}, {"experimentIds": [], "id": "20062030003", "key": "click.continue_with_team"}, {"experimentIds": [], "id": "20068947153", "key": "click.create_organization_free"}, {"experimentIds": [], "id": "20086636658", "key": "click.signup_continue.username"}, {"experimentIds": [], "id": "20091648988", "key": "click.signup_continue.create_account"}, {"experimentIds": [], "id": "20103637615", "key": "click.signup_continue.email"}, {"experimentIds": [], "id": "20111574253", "key": "click.signup_continue.password"}, {"experimentIds": [], "id": "20120044111", "key": "view.pricing_page"}, {"experimentIds": [], "id": "20152062109", "key": "submit.create_account"}, {"experimentIds": [], "id": "20165800992", "key": "submit.upgrade_payment_form"}, {"experimentIds": [], "id": "20171520319", "key": "submit.create_organization"}, {"experimentIds": [], "id": "20222645674", "key": "click.recommended_plan_in_signup.discuss_your_needs"}, {"experimentIds": [], "id": "20227443657", "key": "submit.verify_primary_user_email"}, {"experimentIds": [], "id": "20234607160", "key": "click.recommended_plan_in_signup.try_enterprise"}, {"experimentIds": [], "id": "20238175784", "key": "click.recommended_plan_in_signup.team"}, {"experimentIds": [], "id": "20239847212", "key": "click.recommended_plan_in_signup.continue_free"}, {"experimentIds": [], "id": "20251097193", "key": "recommended_plan"}, {"experimentIds": [], "id": "20438619534", "key": "click.pricing_calculator.1_member"}, {"experimentIds": [], "id": "20456699683", "key": "click.pricing_calculator.15_members"}, {"experimentIds": [], "id": "20467868331", "key": "click.pricing_calculator.10_members"}, {"experimentIds": [], "id": "20476267432", "key": "click.trial_days_remaining"}, {"experimentIds": ["20479227424"], "id": "20476357660", "key": "click.discover_feature"}, {"experimentIds": [], "id": "20479287901", "key": "click.pricing_calculator.custom_members"}, {"experimentIds": [], "id": "20481107083", "key": "click.recommended_plan_in_signup.apply_teacher_benefits"}, {"experimentIds": [], "id": "20483089392", "key": "click.pricing_calculator.5_members"}, {"experimentIds": ["20479227424", "20652570897"], "id": "20484283944", "key": "click.onboarding_task"}, {"experimentIds": [], "id": "20484996281", "key": "click.recommended_plan_in_signup.apply_student_benefits"}, {"experimentIds": ["20479227424"], "id": "20486713726", "key": "click.onboarding_task_breadcrumb"}, {"experimentIds": ["20479227424"], "id": "20490791319", "key": "click.upgrade_to_enterprise"}, {"experimentIds": ["20479227424"], "id": "20491786766", "key": "click.talk_to_us"}, {"experimentIds": ["20479227424"], "id": "20494144087", "key": "click.dismiss_enterprise_trial"}, {"experimentIds": ["20479227424", "20652570897"], "id": "20499722759", "key": "completed_all_tasks"}, {"experimentIds": ["20479227424", "20652570897"], "id": "20500710104", "key": "completed_onboarding_tasks"}, {"experimentIds": ["20479227424"], "id": "20513160672", "key": "click.read_doc"}, {"experimentIds": ["20652570897"], "id": "20516196762", "key": "actions_enabled"}, {"experimentIds": ["20479227424"], "id": "20518980986", "key": "click.dismiss_trial_banner"}, {"experimentIds": [], "id": "20535446721", "key": "click.issue_actions_prompt.dismiss_prompt"}, {"experimentIds": [], "id": "20557002247", "key": "click.issue_actions_prompt.setup_workflow"}, {"experimentIds": [], "id": "20595070227", "key": "click.pull_request_setup_workflow"}, {"experimentIds": [], "id": "20626600314", "key": "click.seats_input"}, {"experimentIds": [], "id": "20642310305", "key": "click.decrease_seats_number"}, {"experimentIds": [], "id": "20662990045", "key": "click.increase_seats_number"}, {"experimentIds": [], "id": "20679620969", "key": "click.public_product_roadmap"}, {"experimentIds": ["20479227424"], "id": "20761240940", "key": "click.dismiss_survey_banner"}, {"experimentIds": ["20479227424"], "id": "20767210721", "key": "click.take_survey"}, {"experimentIds": ["20652570897"], "id": "20795281201", "key": "click.archive_list"}, {"experimentIds": ["20902325119"], "id": "20966790249", "key": "contact_sales.submit"}, {"experimentIds": ["20902325119"], "id": "20996500333", "key": "contact_sales.existing_customer"}, {"experimentIds": ["20902325119"], "id": "20996890162", "key": "contact_sales.blank_message_field"}, {"experimentIds": ["20902325119"], "id": "21000470317", "key": "contact_sales.personal_email"}, {"experimentIds": ["20902325119"], "id": "21002790172", "key": "contact_sales.blank_phone_field"}], "revision": "1040"}" />
<!-- To prevent page flashing, the optimizely JS needs to be loaded in the
<head> tag before the DOM renders -->
<script crossorigin="anonymous" defer="defer" integrity="sha512-cOvwb4vk3pi8KxlHMCqQSKzhC6PxKFQcJl6GRAG5imHxqf3zXi7Lyw2WfEpL4T+L1xHDPhN8VpwuTK1aEKVDPA==" type="application/javascript" src="https://github.githubassets.com/assets/optimizely-70ebf06f.js"></script>
<meta name="hostname" content="github.com">
<meta name="user-login" content="keeganskeate">
<meta name="expected-hostname" content="github.com">
<meta name="js-proxy-site-detection-payload" content="MTBlZTYyNDFjNjM0NjhlOGEwMmM0ZDczMTRkZDQ2ODNkZWY4Y2I1NjM1OWRmNDJmYWE4ZmQxZjVkMmEzNTI0ZHx7InJlbW90ZV9hZGRyZXNzIjoiOTguMTc1LjE1LjExNCIsInJlcXVlc3RfaWQiOiJDMDcxOjIxQTA6NEY2MzM5Ojg2RTgxRjo2MjAyOEI3RCIsInRpbWVzdGFtcCI6MTY0NDMzNDAxMSwiaG9zdCI6ImdpdGh1Yi5jb20ifQ==">
<meta name="keyboard-shortcuts-preference" content="all">
<script type="application/json" id="memex_keyboard_shortcuts_preference">"all"</script>
<meta name="enabled-features" content="ACTIONS_CALLABLE_WORKFLOWS,MARKETPLACE_PENDING_INSTALLATIONS,MARKDOWNIFY_PASTED_LINKS,PRESENCE_IDLE">
<meta http-equiv="x-pjax-version" content="e89a105823a5ac6e5293c1ed35776e70ad834f96dbf22de43ec6f6ebbe4ce44b" data-turbo-track="reload">
<meta http-equiv="x-pjax-csp-version" content="ad743a89372c421844ffcba4fd906096b07b7fd7c2a57617ff2d2f0fdf463e56" data-turbo-track="reload">
<meta http-equiv="x-pjax-css-version" content="6e83a88eaa4a8611b9f8448f3a1f6f91dadf4c3f7e8d1aec9473307b6cd2ef95" data-turbo-track="reload">
<meta http-equiv="x-pjax-js-version" content="c5051803147fe3f834893c2bf99d900098da91d1557f39dda72a83019acf746a" data-turbo-track="reload">
<meta name="go-import" content="github.com/HarshCasper/Rotten-Scripts git https://github.com/HarshCasper/Rotten-Scripts.git">
<meta name="octolytics-dimension-user_id" content="47351025" /><meta name="octolytics-dimension-user_login" content="HarshCasper" /><meta name="octolytics-dimension-repository_id" content="240786294" /><meta name="octolytics-dimension-repository_nwo" content="HarshCasper/Rotten-Scripts" /><meta name="octolytics-dimension-repository_public" content="true" /><meta name="octolytics-dimension-repository_is_fork" content="false" /><meta name="octolytics-dimension-repository_network_root_id" content="240786294" /><meta name="octolytics-dimension-repository_network_root_nwo" content="HarshCasper/Rotten-Scripts" />
<link rel="canonical" href="https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py" data-pjax-transient>
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<meta name="browser-optimizely-client-errors-url" content="https://api.github.com/_private/browser/optimizely_client/errors">
<link rel="mask-icon" href="https://github.githubassets.com/pinned-octocat.svg" color="#000000">
<link rel="alternate icon" class="js-site-favicon" type="image/png" href="https://github.githubassets.com/favicons/favicon.png">
<link rel="icon" class="js-site-favicon" type="image/svg+xml" href="https://github.githubassets.com/favicons/favicon.svg">
<meta name="theme-color" content="#1e2327">
<meta name="color-scheme" content="dark light" />
<link rel="manifest" href="/manifest.json" crossOrigin="use-credentials">
</head>
<body class="logged-in env-production page-responsive page-blob" style="word-wrap: break-word;">
<div class="position-relative js-header-wrapper ">
<a href="#start-of-content" class="p-3 color-bg-accent-emphasis color-fg-on-emphasis show-on-focus js-skip-to-content">Skip to content</a>
<span data-view-component="true" class="progress-pjax-loader js-pjax-loader-bar Progress position-fixed width-full">
<span style="width: 0%;" data-view-component="true" class="Progress-item progress-pjax-loader-bar left-0 top-0 color-bg-accent-emphasis"></span>
</span>
<script crossorigin="anonymous" defer="defer" integrity="sha512-iO4xVyBRZuh6pXaYTnOfmhiF8+9DADXgOtZlmcY+mEcXJ2c/3iHZpnpCOUkR/GXQUmBCXkb25p/2yKS7O0E7Cw==" type="application/javascript" src="https://github.githubassets.com/assets/command-palette-88ee3157.js"></script>
<header class="Header js-details-container Details px-3 px-md-4 px-lg-5 flex-wrap flex-md-nowrap" role="banner" >
<div class="Header-item mt-n1 mb-n1 d-none d-md-flex">
<a
class="Header-link "
href="https://github.com/"
data-hotkey="g d"
aria-label="Homepage "
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"go to dashboard","label":"icon:logo","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="c64c745a1abceda3b82265332a10e5bdc11c85d0fff6e459cc055d3d7bc23856" data-analytics-event="{"category":"Header","action":"go to dashboard","label":"icon:logo"}"
>
<svg height="32" aria-hidden="true" viewBox="0 0 16 16" version="1.1" width="32" data-view-component="true" class="octicon octicon-mark-github v-align-middle">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path>
</svg>
</a>
</div>
<div class="Header-item d-md-none">
<button aria-label="Toggle navigation" aria-expanded="false" type="button" data-view-component="true" class="Header-link js-details-target btn-link"> <svg aria-hidden="true" height="24" viewBox="0 0 16 16" version="1.1" width="24" data-view-component="true" class="octicon octicon-three-bars">
<path fill-rule="evenodd" d="M1 2.75A.75.75 0 011.75 2h12.5a.75.75 0 110 1.5H1.75A.75.75 0 011 2.75zm0 5A.75.75 0 011.75 7h12.5a.75.75 0 110 1.5H1.75A.75.75 0 011 7.75zM1.75 12a.75.75 0 100 1.5h12.5a.75.75 0 100-1.5H1.75z"></path>
</svg>
</button> </div>
<div class="Header-item Header-item--full flex-column flex-md-row width-full flex-order-2 flex-md-order-none mr-0 mr-md-3 mt-3 mt-md-0 Details-content--hidden-not-important d-md-flex">
<div class="header-search flex-auto js-site-search position-relative flex-self-stretch flex-md-self-auto mb-3 mb-md-0 mr-0 mr-md-3 scoped-search site-scoped-search js-jump-to"
>
<div class="position-relative">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-site-search-form" role="search" aria-label="Site" data-scope-type="Repository" data-scope-id="240786294" data-scoped-search-url="/HarshCasper/Rotten-Scripts/search" data-owner-scoped-search-url="/users/HarshCasper/search" data-unscoped-search-url="/search" action="/HarshCasper/Rotten-Scripts/search" accept-charset="UTF-8" method="get">
<label class="form-control input-sm header-search-wrapper p-0 js-chromeless-input-container header-search-wrapper-jump-to position-relative d-flex flex-justify-between flex-items-center">
<input type="text"
class="form-control input-sm header-search-input jump-to-field js-jump-to-field js-site-search-focus js-site-search-field is-clearable"
data-hotkey=s,/
name="q"
data-test-selector="nav-search-input"
placeholder="Search or jump to…"
data-unscoped-placeholder="Search or jump to…"
data-scoped-placeholder="Search or jump to…"
autocapitalize="off"
role="combobox"
aria-haspopup="listbox"
aria-expanded="false"
aria-autocomplete="list"
aria-controls="jump-to-results"
aria-label="Search or jump to…"
data-jump-to-suggestions-path="/_graphql/GetSuggestedNavigationDestinations"
spellcheck="false"
autocomplete="off"
>
<input type="hidden" value="BgTHPXZUyr3VM8xrMCWhg_5dEGSrwqjcxb1l06o3o55a6HxndGgxhCJ9IUG6lxAYhkqvuzP1w0iPjWgX540N2w" data-csrf="true" class="js-data-jump-to-suggestions-path-csrf" />
<input type="hidden" class="js-site-search-type-field" name="type" >
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="20" aria-hidden="true" class="mr-1 header-search-key-slash"><path fill="none" stroke="#979A9C" opacity=".4" d="M3.5.5h12c1.7 0 3 1.3 3 3v13c0 1.7-1.3 3-3 3h-12c-1.7 0-3-1.3-3-3v-13c0-1.7 1.3-3 3-3z"></path><path fill="#979A9C" d="M11.8 6L8 15.1h-.9L10.8 6h1z"></path></svg>
<div class="Box position-absolute overflow-hidden d-none jump-to-suggestions js-jump-to-suggestions-container">
<ul class="d-none js-jump-to-suggestions-template-container">
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-suggestion" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="" data-item-type="suggestion">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg title="Repository" aria-label="Repository" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-repo js-jump-to-octicon-repo d-none flex-shrink-0">
<path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path>
</svg>
<svg title="Project" aria-label="Project" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-project js-jump-to-octicon-project d-none flex-shrink-0">
<path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path>
</svg>
<svg title="Search" aria-label="Search" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-search js-jump-to-octicon-search d-none flex-shrink-0">
<path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path>
</svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 color-bg-subtle px-1 color-fg-muted ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 color-bg-subtle px-1 color-fg-muted ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
</ul>
<ul class="d-none js-jump-to-no-results-template-container">
<li class="d-flex flex-justify-center flex-items-center f5 d-none js-jump-to-suggestion p-2">
<span class="color-fg-muted">No suggested jump to results</span>
</li>
</ul>
<ul id="jump-to-results" role="listbox" class="p-0 m-0 js-navigation-container jump-to-suggestions-results-container js-jump-to-suggestions-results-container">
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-scoped-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="" data-item-type="scoped_search">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg title="Repository" aria-label="Repository" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-repo js-jump-to-octicon-repo d-none flex-shrink-0">
<path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path>
</svg>
<svg title="Project" aria-label="Project" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-project js-jump-to-octicon-project d-none flex-shrink-0">
<path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path>
</svg>
<svg title="Search" aria-label="Search" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-search js-jump-to-octicon-search d-none flex-shrink-0">
<path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path>
</svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 color-bg-subtle px-1 color-fg-muted ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 color-bg-subtle px-1 color-fg-muted ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-owner-scoped-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="" data-item-type="owner_scoped_search">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg title="Repository" aria-label="Repository" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-repo js-jump-to-octicon-repo d-none flex-shrink-0">
<path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path>
</svg>
<svg title="Project" aria-label="Project" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-project js-jump-to-octicon-project d-none flex-shrink-0">
<path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path>
</svg>
<svg title="Search" aria-label="Search" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-search js-jump-to-octicon-search d-none flex-shrink-0">
<path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path>
</svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 color-bg-subtle px-1 color-fg-muted ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this user">
In this user
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 color-bg-subtle px-1 color-fg-muted ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
<li class="d-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-global-search d-none" role="option">
<a tabindex="-1" class="no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path js-jump-to-suggestion-path js-navigation-open p-2" href="" data-item-type="global_search">
<div class="jump-to-octicon js-jump-to-octicon flex-shrink-0 mr-2 text-center d-none">
<svg title="Repository" aria-label="Repository" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-repo js-jump-to-octicon-repo d-none flex-shrink-0">
<path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path>
</svg>
<svg title="Project" aria-label="Project" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-project js-jump-to-octicon-project d-none flex-shrink-0">
<path fill-rule="evenodd" d="M1.75 0A1.75 1.75 0 000 1.75v12.5C0 15.216.784 16 1.75 16h12.5A1.75 1.75 0 0016 14.25V1.75A1.75 1.75 0 0014.25 0H1.75zM1.5 1.75a.25.25 0 01.25-.25h12.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25V1.75zM11.75 3a.75.75 0 00-.75.75v7.5a.75.75 0 001.5 0v-7.5a.75.75 0 00-.75-.75zm-8.25.75a.75.75 0 011.5 0v5.5a.75.75 0 01-1.5 0v-5.5zM8 3a.75.75 0 00-.75.75v3.5a.75.75 0 001.5 0v-3.5A.75.75 0 008 3z"></path>
</svg>
<svg title="Search" aria-label="Search" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-search js-jump-to-octicon-search d-none flex-shrink-0">
<path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path>
</svg>
</div>
<img class="avatar mr-2 flex-shrink-0 js-jump-to-suggestion-avatar d-none" alt="" aria-label="Team" src="" width="28" height="28">
<div class="jump-to-suggestion-name js-jump-to-suggestion-name flex-auto overflow-hidden text-left no-wrap css-truncate css-truncate-target">
</div>
<div class="border rounded-1 flex-shrink-0 color-bg-subtle px-1 color-fg-muted ml-1 f6 d-none js-jump-to-badge-search">
<span class="js-jump-to-badge-search-text-default d-none" aria-label="in this repository">
In this repository
</span>
<span class="js-jump-to-badge-search-text-global d-none" aria-label="in all of GitHub">
All GitHub
</span>
<span aria-hidden="true" class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
<div aria-hidden="true" class="border rounded-1 flex-shrink-0 color-bg-subtle px-1 color-fg-muted ml-1 f6 d-none d-on-nav-focus js-jump-to-badge-jump">
Jump to
<span class="d-inline-block ml-1 v-align-middle">↵</span>
</div>
</a>
</li>
<li class="d-flex flex-justify-center flex-items-center p-0 f5 js-jump-to-suggestion">
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" width="32" height="32" viewBox="0 0 16 16" fill="none" data-view-component="true" class="m-3 anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</li>
</ul>
</div>
</label>
</form> </div>
</div>
<nav class="d-flex flex-column flex-md-row flex-self-stretch flex-md-self-auto" aria-label="Global">
<a class="Header-link py-md-3 d-block d-md-none py-2 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:dashboard:user" aria-label="Dashboard" href="/dashboard">
Dashboard
</a>
<a class="js-selected-navigation-item Header-link mt-md-n3 mb-md-n3 py-2 py-md-3 mr-0 mr-md-3 border-top border-md-top-0 border-white-fade" data-hotkey="g p" data-ga-click="Header, click, Nav menu - item:pulls context:user" aria-label="Pull requests you created" data-selected-links="/pulls /pulls/assigned /pulls/mentioned /pulls" href="/pulls">
Pull<span class="d-inline d-md-none d-lg-inline"> request</span>s
</a>
<a class="js-selected-navigation-item Header-link mt-md-n3 mb-md-n3 py-2 py-md-3 mr-0 mr-md-3 border-top border-md-top-0 border-white-fade" data-hotkey="g i" data-ga-click="Header, click, Nav menu - item:issues context:user" aria-label="Issues you created" data-selected-links="/issues /issues/assigned /issues/mentioned /issues" href="/issues">
Issues
</a>
<div class="d-flex position-relative">
<a class="js-selected-navigation-item Header-link flex-auto mt-md-n3 mb-md-n3 py-2 py-md-3 mr-0 mr-md-3 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:marketplace context:user" data-octo-click="marketplace_click" data-octo-dimensions="location:nav_bar" data-selected-links=" /marketplace" href="/marketplace">
Marketplace
</a> </div>
<a class="js-selected-navigation-item Header-link mt-md-n3 mb-md-n3 py-2 py-md-3 mr-0 mr-md-3 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:explore" data-selected-links="/explore /trending /trending/developers /integrations /integrations/feature/code /integrations/feature/collaborate /integrations/feature/ship showcases showcases_search showcases_landing /explore" href="/explore">
Explore
</a>
<a class="js-selected-navigation-item Header-link d-block d-md-none py-2 py-md-3 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:workspaces context:user" data-selected-links="/codespaces /codespaces" href="/codespaces">
Codespaces
</a>
<a class="js-selected-navigation-item Header-link d-block d-md-none py-2 py-md-3 border-top border-md-top-0 border-white-fade" data-ga-click="Header, click, Nav menu - item:Sponsors" data-hydro-click="{"event_type":"sponsors.button_click","payload":{"button":"HEADER_SPONSORS_DASHBOARD","sponsorable_login":"keeganskeate","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="86dc400468827e6de841c7679bb24f2383d530a8cb83388fdd257e8e3dfd18ce" data-selected-links=" /sponsors/accounts" href="/sponsors/accounts">Sponsors</a>
<a class="Header-link d-block d-md-none mr-0 mr-md-3 py-2 py-md-3 border-top border-md-top-0 border-white-fade" href="/settings/profile">
Settings
</a>
<a class="Header-link d-block d-md-none mr-0 mr-md-3 py-2 py-md-3 border-top border-md-top-0 border-white-fade" href="/keeganskeate">
<img class="avatar avatar-user" loading="lazy" decoding="async" src="https://avatars.githubusercontent.com/u/19616734?s=40&v=4" width="20" height="20" alt="@keeganskeate" />
keeganskeate
</a>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form action="/logout" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="4k91QhhlAeFI7tP6-juadqaZKe0v_XtgRh7DHZLdpCswYUAgWPV2ArNhzbTgkIMwWfjOQ7nm5HpJzUu-qxrdtQ" />
<button
type="submit"
class="Header-link mr-0 mr-md-3 py-2 py-md-3 border-top border-md-top-0 border-white-fade d-md-none btn-link d-block width-full text-left"
style="padding-left: 2px;"
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"sign out","label":"icon:logout","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="2f4d279c6618e4c5a6b62ccd46741c4a597d3ed07dec18736372293257d8285b" data-analytics-event="{"category":"Header","action":"sign out","label":"icon:logout"}"
>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-sign-out v-align-middle">
<path fill-rule="evenodd" d="M2 2.75C2 1.784 2.784 1 3.75 1h2.5a.75.75 0 010 1.5h-2.5a.25.25 0 00-.25.25v10.5c0 .138.112.25.25.25h2.5a.75.75 0 010 1.5h-2.5A1.75 1.75 0 012 13.25V2.75zm10.44 4.5H6.75a.75.75 0 000 1.5h5.69l-1.97 1.97a.75.75 0 101.06 1.06l3.25-3.25a.75.75 0 000-1.06l-3.25-3.25a.75.75 0 10-1.06 1.06l1.97 1.97z"></path>
</svg>
Sign out
</button>
</form></nav>
</div>
<div class="Header-item Header-item--full flex-justify-center d-md-none position-relative">
<a
class="Header-link "
href="https://github.com/"
data-hotkey="g d"
aria-label="Homepage "
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"go to dashboard","label":"icon:logo","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="c64c745a1abceda3b82265332a10e5bdc11c85d0fff6e459cc055d3d7bc23856" data-analytics-event="{"category":"Header","action":"go to dashboard","label":"icon:logo"}"
>
<svg height="32" aria-hidden="true" viewBox="0 0 16 16" version="1.1" width="32" data-view-component="true" class="octicon octicon-mark-github v-align-middle">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path>
</svg>
</a>
</div>
<div class="Header-item mr-0 mr-md-3 flex-order-1 flex-md-order-none">
<notification-indicator
class="js-socket-channel"
data-test-selector="notifications-indicator"
data-channel="eyJjIjoibm90aWZpY2F0aW9uLWNoYW5nZWQ6MTk2MTY3MzQiLCJ0IjoxNjQ0MzM0MDExfQ==--6d5d4c191d20e0ab4227204f55f7fadf386b3e0518bc23b30b8b070b1341ee81">
<a href="/notifications"
class="Header-link notification-indicator position-relative tooltipped tooltipped-sw"
aria-label="You have no unread notifications"
data-hotkey="g n"
data-ga-click="Header, go to notifications, icon:read"
data-target="notification-indicator.link">
<span class="mail-status " data-target="notification-indicator.modifier"></span>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-bell">
<path d="M8 16a2 2 0 001.985-1.75c.017-.137-.097-.25-.235-.25h-3.5c-.138 0-.252.113-.235.25A2 2 0 008 16z"></path><path fill-rule="evenodd" d="M8 1.5A3.5 3.5 0 004.5 5v2.947c0 .346-.102.683-.294.97l-1.703 2.556a.018.018 0 00-.003.01l.001.006c0 .002.002.004.004.006a.017.017 0 00.006.004l.007.001h10.964l.007-.001a.016.016 0 00.006-.004.016.016 0 00.004-.006l.001-.007a.017.017 0 00-.003-.01l-1.703-2.554a1.75 1.75 0 01-.294-.97V5A3.5 3.5 0 008 1.5zM3 5a5 5 0 0110 0v2.947c0 .05.015.098.042.139l1.703 2.555A1.518 1.518 0 0113.482 13H2.518a1.518 1.518 0 01-1.263-2.36l1.703-2.554A.25.25 0 003 7.947V5z"></path>
</svg>
</a>
</notification-indicator>
</div>
<div class="Header-item position-relative d-none d-md-flex">
<details class="details-overlay details-reset">
<summary
class="Header-link"
aria-label="Create new…"
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"create new","label":"icon:add","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="95e007625d82b228b101fb243b4cbc255cfe56d02b00a13c9a7cdd7ebee274c4" data-analytics-event="{"category":"Header","action":"create new","label":"icon:add"}"
>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-plus">
<path fill-rule="evenodd" d="M7.75 2a.75.75 0 01.75.75V7h4.25a.75.75 0 110 1.5H8.5v4.25a.75.75 0 11-1.5 0V8.5H2.75a.75.75 0 010-1.5H7V2.75A.75.75 0 017.75 2z"></path>
</svg> <span class="dropdown-caret"></span>
</summary>
<details-menu class="dropdown-menu dropdown-menu-sw">
<a role="menuitem" class="dropdown-item" href="/new" data-ga-click="Header, create new repository">
New repository
</a>
<a role="menuitem" class="dropdown-item" href="/new/import" data-ga-click="Header, import a repository">
Import repository
</a>
<a role="menuitem" class="dropdown-item" href="https://gist.github.com/" data-ga-click="Header, create new gist">
New gist
</a>
<a role="menuitem" class="dropdown-item" href="/organizations/new" data-ga-click="Header, create new organization">
New organization
</a>
</details-menu>
</details>
</div>
<div class="Header-item position-relative mr-0 d-none d-md-flex">
<details class="details-overlay details-reset js-feature-preview-indicator-container" data-feature-preview-indicator-src="/users/keeganskeate/feature_preview/indicator_check">
<summary
class="Header-link"
aria-label="View profile and more"
data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Header","action":"show menu","label":"icon:avatar","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="37d2eb190d30265bfa764e0a3a419c44f429fc5185883911572b60d7a2850292" data-analytics-event="{"category":"Header","action":"show menu","label":"icon:avatar"}"
>
<img src="https://avatars.githubusercontent.com/u/19616734?s=40&v=4" alt="@keeganskeate" size="20" height="20" width="20" data-view-component="true" class="avatar avatar-small circle" />
<span class="feature-preview-indicator js-feature-preview-indicator" style="top: 1px;" hidden></span>
<span class="dropdown-caret"></span>
</summary>
<details-menu class="dropdown-menu dropdown-menu-sw" style="width: 180px"
src="/users/19616734/menu" preload>
<include-fragment>
<p class="text-center mt-3" data-hide-on-error>
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" width="32" height="32" viewBox="0 0 16 16" fill="none" data-view-component="true" class="anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</p>
<p class="ml-1 mb-2 mt-2 color-fg-default" data-show-on-error>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
Sorry, something went wrong.
</p>
</include-fragment>
</details-menu>
</details>
</div>
</header>
</div>
<div id="start-of-content" class="show-on-focus"></div>
<div data-pjax-replace id="js-flash-container">
<template class="js-flash-template">
<div class="flash flash-full {{ className }}">
<div class=" px-2" >
<button class="flash-close js-flash-close" type="button" aria-label="Dismiss this message">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
<div>{{ message }}</div>
</div>
</div>
</template>
</div>
<include-fragment class="js-notification-shelf-include-fragment" data-base-src="https://github.com/notifications/beta/shelf"></include-fragment>
<details class="details-reset details-overlay details-overlay-dark js-command-palette-dialog">
<summary class="command-palette-details-summary" aria-label="command palette trigger">
</summary>
<details-dialog class="command-palette-details-dialog d-flex flex-column flex-justify-center height-fit" aria-label="command palette">
<command-palette
class="command-palette color-bg-default rounded-3"
data-return-to=/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py
data-user-id="19616734"
data-activation-hotkey="Mod+k,Mod+Alt+k"
data-command-mode-hotkey="Mod+Shift+k"
>
<input type="hidden" name="color-mode-path" id="color-mode-path" value="/settings/appearance/color_mode" class="js-color-mode-path" autocomplete="off" />
<input type="hidden" value="XoDNKoe6kouQU0zBdDOiTnKxBERR0llxkV0nfHAVaK7G9jjCeMDVQqkzJ-dwkGHoOrD7cGXDFKyXVRP8tSchIQ" data-csrf="true" class="js-color-mode-csrf" />
<command-palette-mode
data-char="#"
data-scope-types="[""]"
data-placeholder="Search issues and pull requests"
></command-palette-mode>
<command-palette-mode
data-char="#"
data-scope-types="["owner","repository"]"
data-placeholder="Search issues, pull requests, discussions, and projects"
></command-palette-mode>
<command-palette-mode
data-char="!"
data-scope-types="["owner","repository"]"
data-placeholder="Search projects"
></command-palette-mode>
<command-palette-mode
data-char="@"
data-scope-types="[""]"
data-placeholder="Search or jump to a user, organization, or repository"
></command-palette-mode>
<command-palette-mode
data-char="@"
data-scope-types="["owner"]"
data-placeholder="Search or jump to a repository"
></command-palette-mode>
<command-palette-mode
data-char="/"
data-scope-types="["repository"]"
data-placeholder="Search files"
></command-palette-mode>
<command-palette-mode
data-char="?"
></command-palette-mode>
<command-palette-mode
data-char=">"
data-placeholder="Run a command"
></command-palette-mode>
<command-palette-mode
data-char=""
data-scope-types="["owner"]"
data-placeholder="Search or jump to..."
></command-palette-mode>
<command-palette-mode
class="js-command-palette-default-mode"
data-char=""
data-placeholder="Search or jump to..."
></command-palette-mode>
<command-palette-input placeholder="Search or jump to..."
>
<div class="js-search-icon d-flex flex-items-center mr-2" style="height: 26px">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-search color-fg-muted">
<path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path>
</svg>
</div>
<div class="js-spinner d-flex flex-items-center mr-2 color-fg-muted" hidden>
<svg aria-label="Loading" class="anim-rotate" viewBox="0 0 16 16" fill="none" width="16" height="16">
<circle
cx="8"
cy="8"
r="7"
stroke="currentColor"
stroke-opacity="0.25"
stroke-width="2"
vector-effect="non-scaling-stroke"
></circle>
<path
d="M15 8a7.002 7.002 0 00-7-7"
stroke="currentColor"
stroke-width="2"
stroke-linecap="round"
vector-effect="non-scaling-stroke"
></path>
</svg>
</div>
<command-palette-scope >
<div data-target="command-palette-scope.placeholder" hidden class="color-fg-subtle">/ <span class="text-semibold color-fg-default">...</span> / </div>
<command-palette-token
data-text="HarshCasper"
data-id="MDQ6VXNlcjQ3MzUxMDI1"
data-type="owner"
data-value="HarshCasper"
data-targets="command-palette-scope.tokens"
class="color-fg-default text-semibold"
style="white-space:nowrap;line-height:20px;"
>HarshCasper<span class="color-fg-subtle text-normal"> / </span></command-palette-token>
<command-palette-token
data-text="Rotten-Scripts"
data-id="MDEwOlJlcG9zaXRvcnkyNDA3ODYyOTQ="
data-type="repository"
data-value="Rotten-Scripts"
data-targets="command-palette-scope.tokens"
class="color-fg-default text-semibold"
style="white-space:nowrap;line-height:20px;"
>Rotten-Scripts<span class="color-fg-subtle text-normal"> / </span></command-palette-token>
</command-palette-scope>
<div class="command-palette-input-group flex-1 form-control border-0 box-shadow-none" style="z-index: 0">
<div class="command-palette-typeahead position-absolute d-flex flex-items-center Truncate">
<span class="typeahead-segment input-mirror" data-target="command-palette-input.mirror"></span>
<span class="Truncate-text" data-target="command-palette-input.typeaheadText"></span>
<span class="typeahead-segment" data-target="command-palette-input.typeaheadPlaceholder"></span>
</div>
<input
class="js-overlay-input typeahead-input d-none"
disabled
tabindex="-1"
aria-label="Hidden input for typeahead"
>
<input
type="text"
autocomplete="off"
autocorrect="off"
autocapitalize="off"
spellcheck="false"
class="js-input typeahead-input form-control border-0 box-shadow-none input-block width-full"
aria-label="Command palette input"
aria-haspopup="listbox"
aria-expanded="false"
aria-autocomplete="list"
aria-controls="command-palette-item-stack"
role="combobox"
>
</div>
<button aria-label="clear command palette" aria-keyshortcuts="Control+Backspace" id="command-palette-clear-button" type="button" data-view-component="true" class="btn-octicon js-clear command-palette-input-clear-button"><svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x-circle-fill">
<path fill-rule="evenodd" d="M2.343 13.657A8 8 0 1113.657 2.343 8 8 0 012.343 13.657zM6.03 4.97a.75.75 0 00-1.06 1.06L6.94 8 4.97 9.97a.75.75 0 101.06 1.06L8 9.06l1.97 1.97a.75.75 0 101.06-1.06L9.06 8l1.97-1.97a.75.75 0 10-1.06-1.06L8 6.94 6.03 4.97z"></path>
</svg></button>
<primer-tooltip hidden="hidden" for="command-palette-clear-button" data-direction="w" data-type="description" data-view-component="true">Clear</primer-tooltip>
</command-palette-input>
<command-palette-item-stack id="command-palette-item-stack" class="item-stack-transition-height" role="listbox" aria-label="Command palette results">
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-mode=""
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type <kbd class="hx_kbd">#</kbd> to search pull requests
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-mode=""
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type <kbd class="hx_kbd">#</kbd> to search issues
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-scope-types="["owner","repository"]"
data-mode=""
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type <kbd class="hx_kbd">#</kbd> to search discussions
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-scope-types="["owner","repository"]"
data-mode=""
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type <kbd class="hx_kbd">!</kbd> to search projects
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-scope-types="["owner"]"
data-mode=""
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type <kbd class="hx_kbd">@</kbd> to search teams
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-scope-types="[""]"
data-mode=""
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type <kbd class="hx_kbd">@</kbd> to search people and organizations
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-mode=""
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type <kbd class="hx_kbd">></kbd> to activate command mode
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-mode=""
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Go to your accessibility settings to change your keyboard shortcuts
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-mode="#"
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type author:@me to search your content
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-mode="#"
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type is:pr to filter to pull requests
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-mode="#"
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type is:issue to filter to issues
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-scope-types="["owner","repository"]"
data-mode="#"
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type is:project to filter to projects
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip
class="color-fg-muted f6 px-3 py-1 my-2"
data-mode="#"
data-value="">
<div class="d-flex flex-items-start flex-justify-between">
<div>
<span class="text-bold">Tip:</span>
Type is:open to filter to open content
</div>
<div class="ml-2 flex-shrink-0">
Type <kbd class="hx_kbd">?</kbd> for help and tips
</div>
</div>
</command-palette-tip>
<command-palette-tip class="mx-3 my-2 flash flash-error d-flex flex-items-center" data-on-error>
<div>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
</div>
<div class="px-2">
We’ve encountered an error and some results aren't available at this time. Type a new search or try again later.
</div>
</command-palette-tip>
<command-palette-tip class="h4 color-fg-default pl-3 pb-2 pt-3" data-on-empty data-match-mode="[^?]|^$">
No results matched your search
</command-palette-tip>
<command-palette-item-group
data-group-id="top"
data-group-title="Top result"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="commands"
data-group-title="Commands"
data-group-hint="Type > to filter"
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="global_commands"
data-group-title="Global Commands"
data-group-hint="Type > to filter"
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="files"
data-group-title="Files"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="pages"
data-group-title="Pages"
data-group-hint=""
data-group-limits="{"repository":10}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="access_policies"
data-group-title="Access Policies"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="organizations"
data-group-title="Organizations"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="repositories"
data-group-title="Repositories"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="references"
data-group-title="Issues, pull requests, and discussions"
data-group-hint="Type # to filter"
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="teams"
data-group-title="Teams"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="users"
data-group-title="Users"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="projects"
data-group-title="Projects"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="footer"
data-group-title="Footer"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="modes_help"
data-group-title="Modes"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
<command-palette-item-group
data-group-id="filters_help"
data-group-title="Use filters in issues, pull requests, discussions, and projects"
data-group-hint=""
data-group-limits="{}"
data-targets="command-palette-item-stack.groups"
>
</command-palette-item-group>
</command-palette-item-stack>
<div class="js-command-local-provider-octicons" hidden>
<div data-local-provider-octicon-id="arrow-right-color-fg-muted">
<svg height="16" class="octicon octicon-arrow-right color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.22 2.97a.75.75 0 011.06 0l4.25 4.25a.75.75 0 010 1.06l-4.25 4.25a.75.75 0 01-1.06-1.06l2.97-2.97H3.75a.75.75 0 010-1.5h7.44L8.22 4.03a.75.75 0 010-1.06z"></path></svg>
</div>
<div data-local-provider-octicon-id="arrow-right-color-fg-default">
<svg height="16" class="octicon octicon-arrow-right color-fg-default" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M8.22 2.97a.75.75 0 011.06 0l4.25 4.25a.75.75 0 010 1.06l-4.25 4.25a.75.75 0 01-1.06-1.06l2.97-2.97H3.75a.75.75 0 010-1.5h7.44L8.22 4.03a.75.75 0 010-1.06z"></path></svg>
</div>
<div data-local-provider-octicon-id="codespaces-color-fg-muted">
<svg height="16" class="octicon octicon-codespaces color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M2 1.75C2 .784 2.784 0 3.75 0h8.5C13.216 0 14 .784 14 1.75v5a1.75 1.75 0 01-1.75 1.75h-8.5A1.75 1.75 0 012 6.75v-5zm1.75-.25a.25.25 0 00-.25.25v5c0 .138.112.25.25.25h8.5a.25.25 0 00.25-.25v-5a.25.25 0 00-.25-.25h-8.5zM0 11.25c0-.966.784-1.75 1.75-1.75h12.5c.966 0 1.75.784 1.75 1.75v3A1.75 1.75 0 0114.25 16H1.75A1.75 1.75 0 010 14.25v-3zM1.75 11a.25.25 0 00-.25.25v3c0 .138.112.25.25.25h12.5a.25.25 0 00.25-.25v-3a.25.25 0 00-.25-.25H1.75z"></path><path fill-rule="evenodd" d="M3 12.75a.75.75 0 01.75-.75h.5a.75.75 0 010 1.5h-.5a.75.75 0 01-.75-.75zm4 0a.75.75 0 01.75-.75h4.5a.75.75 0 010 1.5h-4.5a.75.75 0 01-.75-.75z"></path></svg>
</div>
<div data-local-provider-octicon-id="copy-color-fg-muted">
<svg height="16" class="octicon octicon-copy color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M0 6.75C0 5.784.784 5 1.75 5h1.5a.75.75 0 010 1.5h-1.5a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-1.5a.75.75 0 011.5 0v1.5A1.75 1.75 0 019.25 16h-7.5A1.75 1.75 0 010 14.25v-7.5z"></path><path fill-rule="evenodd" d="M5 1.75C5 .784 5.784 0 6.75 0h7.5C15.216 0 16 .784 16 1.75v7.5A1.75 1.75 0 0114.25 11h-7.5A1.75 1.75 0 015 9.25v-7.5zm1.75-.25a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-7.5a.25.25 0 00-.25-.25h-7.5z"></path></svg>
</div>
<div data-local-provider-octicon-id="dash-color-fg-muted">
<svg height="16" class="octicon octicon-dash color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M2 7.75A.75.75 0 012.75 7h10a.75.75 0 010 1.5h-10A.75.75 0 012 7.75z"></path></svg>
</div>
<div data-local-provider-octicon-id="file-color-fg-muted">
<svg height="16" class="octicon octicon-file color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M3.75 1.5a.25.25 0 00-.25.25v11.5c0 .138.112.25.25.25h8.5a.25.25 0 00.25-.25V6H9.75A1.75 1.75 0 018 4.25V1.5H3.75zm5.75.56v2.19c0 .138.112.25.25.25h2.19L9.5 2.06zM2 1.75C2 .784 2.784 0 3.75 0h5.086c.464 0 .909.184 1.237.513l3.414 3.414c.329.328.513.773.513 1.237v8.086A1.75 1.75 0 0112.25 15h-8.5A1.75 1.75 0 012 13.25V1.75z"></path></svg>
</div>
<div data-local-provider-octicon-id="lock-color-fg-muted">
<svg height="16" class="octicon octicon-lock color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M4 4v2h-.25A1.75 1.75 0 002 7.75v5.5c0 .966.784 1.75 1.75 1.75h8.5A1.75 1.75 0 0014 13.25v-5.5A1.75 1.75 0 0012.25 6H12V4a4 4 0 10-8 0zm6.5 2V4a2.5 2.5 0 00-5 0v2h5zM12 7.5h.25a.25.25 0 01.25.25v5.5a.25.25 0 01-.25.25h-8.5a.25.25 0 01-.25-.25v-5.5a.25.25 0 01.25-.25H12z"></path></svg>
</div>
<div data-local-provider-octicon-id="moon-color-fg-muted">
<svg height="16" class="octicon octicon-moon color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M9.598 1.591a.75.75 0 01.785-.175 7 7 0 11-8.967 8.967.75.75 0 01.961-.96 5.5 5.5 0 007.046-7.046.75.75 0 01.175-.786zm1.616 1.945a7 7 0 01-7.678 7.678 5.5 5.5 0 107.678-7.678z"></path></svg>
</div>
<div data-local-provider-octicon-id="person-color-fg-muted">
<svg height="16" class="octicon octicon-person color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M10.5 5a2.5 2.5 0 11-5 0 2.5 2.5 0 015 0zm.061 3.073a4 4 0 10-5.123 0 6.004 6.004 0 00-3.431 5.142.75.75 0 001.498.07 4.5 4.5 0 018.99 0 .75.75 0 101.498-.07 6.005 6.005 0 00-3.432-5.142z"></path></svg>
</div>
<div data-local-provider-octicon-id="pencil-color-fg-muted">
<svg height="16" class="octicon octicon-pencil color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M11.013 1.427a1.75 1.75 0 012.474 0l1.086 1.086a1.75 1.75 0 010 2.474l-8.61 8.61c-.21.21-.47.364-.756.445l-3.251.93a.75.75 0 01-.927-.928l.929-3.25a1.75 1.75 0 01.445-.758l8.61-8.61zm1.414 1.06a.25.25 0 00-.354 0L10.811 3.75l1.439 1.44 1.263-1.263a.25.25 0 000-.354l-1.086-1.086zM11.189 6.25L9.75 4.81l-6.286 6.287a.25.25 0 00-.064.108l-.558 1.953 1.953-.558a.249.249 0 00.108-.064l6.286-6.286z"></path></svg>
</div>
<div data-local-provider-octicon-id="issue-opened-open">
<svg height="16" class="octicon octicon-issue-opened open" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path d="M8 9.5a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path><path fill-rule="evenodd" d="M8 0a8 8 0 100 16A8 8 0 008 0zM1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0z"></path></svg>
</div>
<div data-local-provider-octicon-id="git-pull-request-draft-color-fg-muted">
<svg height="16" class="octicon octicon-git-pull-request-draft color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M2.5 3.25a.75.75 0 111.5 0 .75.75 0 01-1.5 0zM3.25 1a2.25 2.25 0 00-.75 4.372v5.256a2.251 2.251 0 101.5 0V5.372A2.25 2.25 0 003.25 1zm0 11a.75.75 0 100 1.5.75.75 0 000-1.5zm9.5 3a2.25 2.25 0 100-4.5 2.25 2.25 0 000 4.5zm0-3a.75.75 0 100 1.5.75.75 0 000-1.5z"></path><path d="M14 7.5a1.25 1.25 0 11-2.5 0 1.25 1.25 0 012.5 0zm0-4.25a1.25 1.25 0 11-2.5 0 1.25 1.25 0 012.5 0z"></path></svg>
</div>
<div data-local-provider-octicon-id="search-color-fg-muted">
<svg height="16" class="octicon octicon-search color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M11.5 7a4.499 4.499 0 11-8.998 0A4.499 4.499 0 0111.5 7zm-.82 4.74a6 6 0 111.06-1.06l3.04 3.04a.75.75 0 11-1.06 1.06l-3.04-3.04z"></path></svg>
</div>
<div data-local-provider-octicon-id="sun-color-fg-muted">
<svg height="16" class="octicon octicon-sun color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 10.5a2.5 2.5 0 100-5 2.5 2.5 0 000 5zM8 12a4 4 0 100-8 4 4 0 000 8zM8 0a.75.75 0 01.75.75v1.5a.75.75 0 01-1.5 0V.75A.75.75 0 018 0zm0 13a.75.75 0 01.75.75v1.5a.75.75 0 01-1.5 0v-1.5A.75.75 0 018 13zM2.343 2.343a.75.75 0 011.061 0l1.06 1.061a.75.75 0 01-1.06 1.06l-1.06-1.06a.75.75 0 010-1.06zm9.193 9.193a.75.75 0 011.06 0l1.061 1.06a.75.75 0 01-1.06 1.061l-1.061-1.06a.75.75 0 010-1.061zM16 8a.75.75 0 01-.75.75h-1.5a.75.75 0 010-1.5h1.5A.75.75 0 0116 8zM3 8a.75.75 0 01-.75.75H.75a.75.75 0 010-1.5h1.5A.75.75 0 013 8zm10.657-5.657a.75.75 0 010 1.061l-1.061 1.06a.75.75 0 11-1.06-1.06l1.06-1.06a.75.75 0 011.06 0zm-9.193 9.193a.75.75 0 010 1.06l-1.06 1.061a.75.75 0 11-1.061-1.06l1.06-1.061a.75.75 0 011.061 0z"></path></svg>
</div>
<div data-local-provider-octicon-id="sync-color-fg-muted">
<svg height="16" class="octicon octicon-sync color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 2.5a5.487 5.487 0 00-4.131 1.869l1.204 1.204A.25.25 0 014.896 6H1.25A.25.25 0 011 5.75V2.104a.25.25 0 01.427-.177l1.38 1.38A7.001 7.001 0 0114.95 7.16a.75.75 0 11-1.49.178A5.501 5.501 0 008 2.5zM1.705 8.005a.75.75 0 01.834.656 5.501 5.501 0 009.592 2.97l-1.204-1.204a.25.25 0 01.177-.427h3.646a.25.25 0 01.25.25v3.646a.25.25 0 01-.427.177l-1.38-1.38A7.001 7.001 0 011.05 8.84a.75.75 0 01.656-.834z"></path></svg>
</div>
<div data-local-provider-octicon-id="trash-color-fg-muted">
<svg height="16" class="octicon octicon-trash color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M6.5 1.75a.25.25 0 01.25-.25h2.5a.25.25 0 01.25.25V3h-3V1.75zm4.5 0V3h2.25a.75.75 0 010 1.5H2.75a.75.75 0 010-1.5H5V1.75C5 .784 5.784 0 6.75 0h2.5C10.216 0 11 .784 11 1.75zM4.496 6.675a.75.75 0 10-1.492.15l.66 6.6A1.75 1.75 0 005.405 15h5.19c.9 0 1.652-.681 1.741-1.576l.66-6.6a.75.75 0 00-1.492-.149l-.66 6.6a.25.25 0 01-.249.225h-5.19a.25.25 0 01-.249-.225l-.66-6.6z"></path></svg>
</div>
<div data-local-provider-octicon-id="key-color-fg-muted">
<svg height="16" class="octicon octicon-key color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M6.5 5.5a4 4 0 112.731 3.795.75.75 0 00-.768.18L7.44 10.5H6.25a.75.75 0 00-.75.75v1.19l-.06.06H4.25a.75.75 0 00-.75.75v1.19l-.06.06H1.75a.25.25 0 01-.25-.25v-1.69l5.024-5.023a.75.75 0 00.181-.768A3.995 3.995 0 016.5 5.5zm4-5.5a5.5 5.5 0 00-5.348 6.788L.22 11.72a.75.75 0 00-.22.53v2C0 15.216.784 16 1.75 16h2a.75.75 0 00.53-.22l.5-.5a.75.75 0 00.22-.53V14h.75a.75.75 0 00.53-.22l.5-.5a.75.75 0 00.22-.53V12h.75a.75.75 0 00.53-.22l.932-.932A5.5 5.5 0 1010.5 0zm.5 6a1 1 0 100-2 1 1 0 000 2z"></path></svg>
</div>
<div data-local-provider-octicon-id="comment-discussion-color-fg-muted">
<svg height="16" class="octicon octicon-comment-discussion color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M1.5 2.75a.25.25 0 01.25-.25h8.5a.25.25 0 01.25.25v5.5a.25.25 0 01-.25.25h-3.5a.75.75 0 00-.53.22L3.5 11.44V9.25a.75.75 0 00-.75-.75h-1a.25.25 0 01-.25-.25v-5.5zM1.75 1A1.75 1.75 0 000 2.75v5.5C0 9.216.784 10 1.75 10H2v1.543a1.457 1.457 0 002.487 1.03L7.061 10h3.189A1.75 1.75 0 0012 8.25v-5.5A1.75 1.75 0 0010.25 1h-8.5zM14.5 4.75a.25.25 0 00-.25-.25h-.5a.75.75 0 110-1.5h.5c.966 0 1.75.784 1.75 1.75v5.5A1.75 1.75 0 0114.25 12H14v1.543a1.457 1.457 0 01-2.487 1.03L9.22 12.28a.75.75 0 111.06-1.06l2.22 2.22v-2.19a.75.75 0 01.75-.75h1a.25.25 0 00.25-.25v-5.5z"></path></svg>
</div>
<div data-local-provider-octicon-id="bell-color-fg-muted">
<svg height="16" class="octicon octicon-bell color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path d="M8 16a2 2 0 001.985-1.75c.017-.137-.097-.25-.235-.25h-3.5c-.138 0-.252.113-.235.25A2 2 0 008 16z"></path><path fill-rule="evenodd" d="M8 1.5A3.5 3.5 0 004.5 5v2.947c0 .346-.102.683-.294.97l-1.703 2.556a.018.018 0 00-.003.01l.001.006c0 .002.002.004.004.006a.017.017 0 00.006.004l.007.001h10.964l.007-.001a.016.016 0 00.006-.004.016.016 0 00.004-.006l.001-.007a.017.017 0 00-.003-.01l-1.703-2.554a1.75 1.75 0 01-.294-.97V5A3.5 3.5 0 008 1.5zM3 5a5 5 0 0110 0v2.947c0 .05.015.098.042.139l1.703 2.555A1.518 1.518 0 0113.482 13H2.518a1.518 1.518 0 01-1.263-2.36l1.703-2.554A.25.25 0 003 7.947V5z"></path></svg>
</div>
<div data-local-provider-octicon-id="bell-slash-color-fg-muted">
<svg height="16" class="octicon octicon-bell-slash color-fg-muted" viewBox="0 0 16 16" version="1.1" width="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 1.5c-.997 0-1.895.416-2.534 1.086A.75.75 0 014.38 1.55 5 5 0 0113 5v2.373a.75.75 0 01-1.5 0V5A3.5 3.5 0 008 1.5zM4.182 4.31L1.19 2.143a.75.75 0 10-.88 1.214L3 5.305v2.642a.25.25 0 01-.042.139L1.255 10.64A1.518 1.518 0 002.518 13h11.108l1.184.857a.75.75 0 10.88-1.214l-1.375-.996a1.196 1.196 0 00-.013-.01L4.198 4.321a.733.733 0 00-.016-.011zm7.373 7.19L4.5 6.391v1.556c0 .346-.102.683-.294.97l-1.703 2.556a.018.018 0 00-.003.01.015.015 0 00.005.012.017.017 0 00.006.004l.007.001h9.037zM8 16a2 2 0 001.985-1.75c.017-.137-.097-.25-.235-.25h-3.5c-.138 0-.252.113-.235.25A2 2 0 008 16z"></path></svg>
</div>
</div>
<server-defined-provider data-type="search-links"></server-defined-provider>
<server-defined-provider data-type="help">
<command-palette-help
data-group="modes_help"
data-prefix="#"
data-scope-types="[""]"
>
<span data-target="command-palette-help.titleElement">Search for <strong>issues</strong> and <strong>pull requests</strong></span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd">#</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="modes_help"
data-prefix="#"
data-scope-types="["owner","repository"]"
>
<span data-target="command-palette-help.titleElement">Search for <strong>issues, pull requests, discussions,</strong> and <strong>projects</strong></span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd">#</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="modes_help"
data-prefix="@"
data-scope-types="[""]"
>
<span data-target="command-palette-help.titleElement">Search for <strong>organizations, repositories,</strong> and <strong>users</strong></span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd">@</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="modes_help"
data-prefix="!"
data-scope-types="["owner","repository"]"
>
<span data-target="command-palette-help.titleElement">Search for <strong>projects</strong></span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd">!</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="modes_help"
data-prefix="/"
data-scope-types="["repository"]"
>
<span data-target="command-palette-help.titleElement">Search for <strong>files</strong></span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd">/</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="modes_help"
data-prefix=">"
>
<span data-target="command-palette-help.titleElement">Activate <strong>command mode</strong></span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd">></kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="filters_help"
data-prefix="# author:@me"
>
<span data-target="command-palette-help.titleElement">Search your issues, pull requests, and discussions</span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd"># author:@me</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="filters_help"
data-prefix="# author:@me"
>
<span data-target="command-palette-help.titleElement">Search your issues, pull requests, and discussions</span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd"># author:@me</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="filters_help"
data-prefix="# is:pr"
>
<span data-target="command-palette-help.titleElement">Filter to pull requests</span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd"># is:pr</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="filters_help"
data-prefix="# is:issue"
>
<span data-target="command-palette-help.titleElement">Filter to issues</span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd"># is:issue</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="filters_help"
data-prefix="# is:discussion"
data-scope-types="["owner","repository"]"
>
<span data-target="command-palette-help.titleElement">Filter to discussions</span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd"># is:discussion</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="filters_help"
data-prefix="# is:project"
data-scope-types="["owner","repository"]"
>
<span data-target="command-palette-help.titleElement">Filter to projects</span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd"># is:project</kbd>
</span>
</command-palette-help>
<command-palette-help
data-group="filters_help"
data-prefix="# is:open"
>
<span data-target="command-palette-help.titleElement">Filter to open issues, pull requests, and discussions</span>
<span data-target="command-palette-help.hintElement">
<kbd class="hx_kbd"># is:open</kbd>
</span>
</command-palette-help>
</server-defined-provider>
<server-defined-provider
data-type="prefetched"
data-fetch-debounce="0"
data-src="/command_palette/commands"
data-supported-modes="[">"]"
data-supports-commands
></server-defined-provider>
<server-defined-provider
data-type="prefetched"
data-fetch-debounce="0"
data-src="/command_palette/jump_to_page_navigation"
data-supported-modes="[""]"
></server-defined-provider>
<server-defined-provider
data-type="remote"
data-fetch-debounce="200"
data-src="/command_palette/issues"
data-supported-modes="["#","#"]"
data-supported-scope-types="["owner","repository",""]"
></server-defined-provider>
<server-defined-provider
data-type="remote"
data-fetch-debounce="200"
data-src="/command_palette/jump_to"
data-supported-modes="["@","@"]"
data-supported-scope-types="["","owner"]"
></server-defined-provider>
<server-defined-provider
data-type="remote"
data-fetch-debounce="200"
data-src="/command_palette/jump_to_members_only"
data-supported-modes="[""]"
></server-defined-provider>
<server-defined-provider
data-type="prefetched"
data-fetch-debounce="0"
data-src="/command_palette/jump_to_members_only_prefetched"
data-supported-modes="["@","@",""]"
data-supported-scope-types="["","owner"]"
></server-defined-provider>
<server-defined-provider
data-type="files"
data-fetch-debounce="0"
data-src="/command_palette/files"
data-supported-modes="["/"]"
data-supported-scope-types="["repository"]"
></server-defined-provider>
<server-defined-provider
data-type="remote"
data-fetch-debounce="200"
data-src="/command_palette/discussions"
data-supported-modes="["#"]"
data-supported-scope-types="["owner","repository"]"
></server-defined-provider>
<server-defined-provider
data-type="remote"
data-fetch-debounce="200"
data-src="/command_palette/projects"
data-supported-modes="["#","!"]"
data-supported-scope-types="["owner","repository"]"
></server-defined-provider>
<server-defined-provider
data-type="prefetched"
data-fetch-debounce="0"
data-src="/command_palette/recent_issues"
data-supported-modes="["#","#"]"
data-supported-scope-types="["owner","repository",""]"
></server-defined-provider>
<server-defined-provider
data-type="remote"
data-fetch-debounce="200"
data-src="/command_palette/teams"
data-supported-modes="["@",""]"
data-supported-scope-types="["owner"]"
></server-defined-provider>
<server-defined-provider
data-type="remote"
data-fetch-debounce="200"
data-src="/command_palette/name_with_owner_repository"
data-supported-modes="[""]"
></server-defined-provider>
<server-defined-provider
data-type="main-window-commands"
data-fetch-debounce="0"
data-supported-modes="[">"]"
data-supports-commands
></server-defined-provider>
</command-palette>
</details-dialog>
</details>
<div class="position-fixed bottom-0 left-0 ml-5 mb-5 js-command-palette-toasts" style="z-index: 1000">
<div hidden class="Toast Toast--loading">
<span class="Toast-icon">
<svg class="Toast--spinner" viewBox="0 0 32 32" width="18" height="18" aria-hidden="true">
<path
fill="#959da5"
d="M16 0 A16 16 0 0 0 16 32 A16 16 0 0 0 16 0 M16 4 A12 12 0 0 1 16 28 A12 12 0 0 1 16 4"
/>
<path fill="#ffffff" d="M16 0 A16 16 0 0 1 32 16 L28 16 A12 12 0 0 0 16 4z"></path>
</svg>
</span>
<span class="Toast-content"></span>
</div>
<div hidden class="anim-fade-in fast Toast Toast--error">
<span class="Toast-icon">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-stop">
<path fill-rule="evenodd" d="M4.47.22A.75.75 0 015 0h6a.75.75 0 01.53.22l4.25 4.25c.141.14.22.331.22.53v6a.75.75 0 01-.22.53l-4.25 4.25A.75.75 0 0111 16H5a.75.75 0 01-.53-.22L.22 11.53A.75.75 0 010 11V5a.75.75 0 01.22-.53L4.47.22zm.84 1.28L1.5 5.31v5.38l3.81 3.81h5.38l3.81-3.81V5.31L10.69 1.5H5.31zM8 4a.75.75 0 01.75.75v3.5a.75.75 0 01-1.5 0v-3.5A.75.75 0 018 4zm0 8a1 1 0 100-2 1 1 0 000 2z"></path>
</svg>
</span>
<span class="Toast-content"></span>
</div>
<div hidden class="anim-fade-in fast Toast Toast--warning">
<span class="Toast-icon">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
</span>
<span class="Toast-content"></span>
</div>
<div hidden class="anim-fade-in fast Toast Toast--success">
<span class="Toast-icon">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<span class="Toast-content"></span>
</div>
<div hidden class="anim-fade-in fast Toast">
<span class="Toast-icon">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-info">
<path fill-rule="evenodd" d="M8 1.5a6.5 6.5 0 100 13 6.5 6.5 0 000-13zM0 8a8 8 0 1116 0A8 8 0 010 8zm6.5-.25A.75.75 0 017.25 7h1a.75.75 0 01.75.75v2.75h.25a.75.75 0 010 1.5h-2a.75.75 0 010-1.5h.25v-2h-.25a.75.75 0 01-.75-.75zM8 6a1 1 0 100-2 1 1 0 000 2z"></path>
</svg>
</span>
<span class="Toast-content"></span>
</div>
</div>
<div hidden class="js-command-palette-pjax-meta-data" data-pjax-replace id="command-palette-pjax-meta-data"
data-subject-id="MDEwOlJlcG9zaXRvcnkyNDA3ODYyOTQ="
data-subject-type="Repository"
>
</div>
<div
class="application-main "
data-commit-hovercards-enabled
data-discussion-hovercards-enabled
data-issue-and-pr-hovercards-enabled
>
<div itemscope itemtype="http://schema.org/SoftwareSourceCode" class="">
<main id="js-repo-pjax-container" data-pjax-container >
<div id="repository-container-header" class="pt-3 hide-full-screen mb-5" style="background-color: var(--color-page-header-bg);" data-pjax-replace>
<div class="d-flex mb-3 px-3 px-md-4 px-lg-5">
<div class="flex-auto min-width-0 width-fit mr-3">
<h1 class=" d-flex flex-wrap flex-items-center wb-break-word f3 text-normal">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-repo color-fg-muted mr-2">
<path fill-rule="evenodd" d="M2 2.5A2.5 2.5 0 014.5 0h8.75a.75.75 0 01.75.75v12.5a.75.75 0 01-.75.75h-2.5a.75.75 0 110-1.5h1.75v-2h-8a1 1 0 00-.714 1.7.75.75 0 01-1.072 1.05A2.495 2.495 0 012 11.5v-9zm10.5-1V9h-8c-.356 0-.694.074-1 .208V2.5a1 1 0 011-1h8zM5 12.25v3.25a.25.25 0 00.4.2l1.45-1.087a.25.25 0 01.3 0L8.6 15.7a.25.25 0 00.4-.2v-3.25a.25.25 0 00-.25-.25h-3.5a.25.25 0 00-.25.25z"></path>
</svg>
<span class="author flex-self-stretch" itemprop="author">
<a class="url fn" rel="author" data-hovercard-type="user" data-hovercard-url="/users/HarshCasper/hovercard" data-octo-click="hovercard-link-click" data-octo-dimensions="link_type:self" href="/HarshCasper">HarshCasper</a>
</span>
<span class="mx-1 flex-self-stretch color-fg-muted">/</span>
<strong itemprop="name" class="mr-2 flex-self-stretch">
<a data-pjax="#repo-content-pjax-container" href="/HarshCasper/Rotten-Scripts">Rotten-Scripts</a>
</strong>
<span></span><span class="Label Label--secondary v-align-middle mr-1">Public</span>
</h1>
</div>
<ul class="pagehead-actions flex-shrink-0 d-none d-md-inline" style="padding: 2px 0;">
<li>
<notifications-list-subscription-form
data-action="notifications-dialog-label-toggled:notifications-list-subscription-form#handleDialogLabelToggle"
class="f5 position-relative"
>
<details
class="details-reset details-overlay f5 position-relative"
data-target="notifications-list-subscription-form.details"
data-action="toggle:notifications-list-subscription-form#detailsToggled"
>
<summary data-hydro-click="{"event_type":"repository.click","payload":{"target":"WATCH_BUTTON","repository_id":240786294,"originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="f7aa557897a9a1b3bca45f11d1c9ec68a380bee79f5bb75afc6ecd1d5f89fc47" data-ga-click="Repository, click Watch settings, action:blob#show" aria-label="Notification settings" data-view-component="true" class="btn-sm btn"> <span data-menu-button>
<span
hidden
data-target="notifications-list-subscription-form.unwatchButtonCopy"
>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-eye">
<path fill-rule="evenodd" d="M1.679 7.932c.412-.621 1.242-1.75 2.366-2.717C5.175 4.242 6.527 3.5 8 3.5c1.473 0 2.824.742 3.955 1.715 1.124.967 1.954 2.096 2.366 2.717a.119.119 0 010 .136c-.412.621-1.242 1.75-2.366 2.717C10.825 11.758 9.473 12.5 8 12.5c-1.473 0-2.824-.742-3.955-1.715C2.92 9.818 2.09 8.69 1.679 8.068a.119.119 0 010-.136zM8 2c-1.981 0-3.67.992-4.933 2.078C1.797 5.169.88 6.423.43 7.1a1.619 1.619 0 000 1.798c.45.678 1.367 1.932 2.637 3.024C4.329 13.008 6.019 14 8 14c1.981 0 3.67-.992 4.933-2.078 1.27-1.091 2.187-2.345 2.637-3.023a1.619 1.619 0 000-1.798c-.45-.678-1.367-1.932-2.637-3.023C11.671 2.992 9.981 2 8 2zm0 8a2 2 0 100-4 2 2 0 000 4z"></path>
</svg>
Unwatch
</span>
<span
hidden
data-target="notifications-list-subscription-form.stopIgnoringButtonCopy"
>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-bell-slash">
<path fill-rule="evenodd" d="M8 1.5c-.997 0-1.895.416-2.534 1.086A.75.75 0 014.38 1.55 5 5 0 0113 5v2.373a.75.75 0 01-1.5 0V5A3.5 3.5 0 008 1.5zM4.182 4.31L1.19 2.143a.75.75 0 10-.88 1.214L3 5.305v2.642a.25.25 0 01-.042.139L1.255 10.64A1.518 1.518 0 002.518 13h11.108l1.184.857a.75.75 0 10.88-1.214l-1.375-.996a1.196 1.196 0 00-.013-.01L4.198 4.321a.733.733 0 00-.016-.011zm7.373 7.19L4.5 6.391v1.556c0 .346-.102.683-.294.97l-1.703 2.556a.018.018 0 00-.003.01.015.015 0 00.005.012.017.017 0 00.006.004l.007.001h9.037zM8 16a2 2 0 001.985-1.75c.017-.137-.097-.25-.235-.25h-3.5c-.138 0-.252.113-.235.25A2 2 0 008 16z"></path>
</svg>
Stop ignoring
</span>
<span
data-target="notifications-list-subscription-form.watchButtonCopy"
>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-eye">
<path fill-rule="evenodd" d="M1.679 7.932c.412-.621 1.242-1.75 2.366-2.717C5.175 4.242 6.527 3.5 8 3.5c1.473 0 2.824.742 3.955 1.715 1.124.967 1.954 2.096 2.366 2.717a.119.119 0 010 .136c-.412.621-1.242 1.75-2.366 2.717C10.825 11.758 9.473 12.5 8 12.5c-1.473 0-2.824-.742-3.955-1.715C2.92 9.818 2.09 8.69 1.679 8.068a.119.119 0 010-.136zM8 2c-1.981 0-3.67.992-4.933 2.078C1.797 5.169.88 6.423.43 7.1a1.619 1.619 0 000 1.798c.45.678 1.367 1.932 2.637 3.024C4.329 13.008 6.019 14 8 14c1.981 0 3.67-.992 4.933-2.078 1.27-1.091 2.187-2.345 2.637-3.023a1.619 1.619 0 000-1.798c-.45-.678-1.367-1.932-2.637-3.023C11.671 2.992 9.981 2 8 2zm0 8a2 2 0 100-4 2 2 0 000 4z"></path>
</svg>
Watch
</span>
</span>
<span id="repo-notifications-counter" data-target="notifications-list-subscription-form.socialCount" data-pjax-replace="true" title="16" data-view-component="true" class="Counter">16</span>
<span class="dropdown-caret"></span>
</summary>
<details-menu
class="SelectMenu "
role="menu"
data-target="notifications-list-subscription-form.menu"
>
<div class="SelectMenu-modal notifications-component-menu-modal">
<header class="SelectMenu-header">
<h3 class="SelectMenu-title">Notifications</h3>
<button class="SelectMenu-closeButton" type="button" aria-label="Close menu" data-action="click:notifications-list-subscription-form#closeMenu">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
</header>
<div class="SelectMenu-list">
<form data-target="notifications-list-subscription-form.form" data-action="submit:notifications-list-subscription-form#submitForm" action="/notifications/subscribe" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="Xmbxp3OELvIS3tB1cYxVdkNhmOoCRJfUD_0AuoZYJGpXMG8TiMk9pYsHoxvfY67g2vxOgSG-qPaAi8Fms0HLbA" autocomplete="off" />
<input type="hidden" name="repository_id" value="240786294">
<button
type="submit"
name="do"
value="included"
class="SelectMenu-item flex-items-start"
role="menuitemradio"
aria-checked="true"
data-targets="notifications-list-subscription-form.subscriptionButtons"
>
<span class="f5">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<div>
<div class="f5 text-bold">
Participating and @mentions
</div>
<div class="text-small color-fg-muted text-normal pb-1">
Only receive notifications from this repository when participating or @mentioned.
</div>
</div>
</button>
<button
type="submit"
name="do"
value="subscribed"
class="SelectMenu-item flex-items-start"
role="menuitemradio"
aria-checked="false"
data-targets="notifications-list-subscription-form.subscriptionButtons"
>
<span class="f5">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<div>
<div class="f5 text-bold">
All Activity
</div>
<div class="text-small color-fg-muted text-normal pb-1">
Notified of all notifications on this repository.
</div>
</div>
</button>
<button
type="submit"
name="do"
value="ignore"
class="SelectMenu-item flex-items-start"
role="menuitemradio"
aria-checked="false"
data-targets="notifications-list-subscription-form.subscriptionButtons"
>
<span class="f5">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<div>
<div class="f5 text-bold">
Ignore
</div>
<div class="text-small color-fg-muted text-normal pb-1">
Never be notified.
</div>
</div>
</button>
</form>
<button
class="SelectMenu-item flex-items-start pr-3"
type="button"
role="menuitemradio"
data-target="notifications-list-subscription-form.customButton"
data-action="click:notifications-list-subscription-form#openCustomDialog"
aria-haspopup="true"
aria-checked="false"
>
<span class="f5">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<div>
<div class="d-flex flex-items-start flex-justify-between">
<div class="f5 text-bold">Custom</div>
<div class="f5 pr-1">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-arrow-right">
<path fill-rule="evenodd" d="M8.22 2.97a.75.75 0 011.06 0l4.25 4.25a.75.75 0 010 1.06l-4.25 4.25a.75.75 0 01-1.06-1.06l2.97-2.97H3.75a.75.75 0 010-1.5h7.44L8.22 4.03a.75.75 0 010-1.06z"></path>
</svg>
</div>
</div>
<div class="text-small color-fg-muted text-normal pb-1">
Select events you want to be notified of in addition to participating and @mentions.
</div>
</div>
</button>
<div class="px-3 py-2 d-flex color-bg-subtle flex-items-center">
<span class="f5">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-device-mobile SelectMenu-icon SelectMenu-icon--device-mobile">
<path fill-rule="evenodd" d="M3.75 0A1.75 1.75 0 002 1.75v12.5c0 .966.784 1.75 1.75 1.75h8.5A1.75 1.75 0 0014 14.25V1.75A1.75 1.75 0 0012.25 0h-8.5zM3.5 1.75a.25.25 0 01.25-.25h8.5a.25.25 0 01.25.25v12.5a.25.25 0 01-.25.25h-8.5a.25.25 0 01-.25-.25V1.75zM8 13a1 1 0 100-2 1 1 0 000 2z"></path>
</svg>
</span>
<span className="text-small color-fg-muted text-normal pb-1">
Get push notifications on <a target="_blank" rel="noopener noreferrer" href="https://apps.apple.com/app/apple-store/id1477376905?ct=watch-dropdown&mt=8&pt=524675">iOS</a> or <a target="_blank" rel="noopener noreferrer" href="https://play.google.com/store/apps/details?id=com.github.android&referrer=utm_campaign%3Dwatch-dropdown%26utm_medium%3Dweb%26utm_source%3Dgithub">Android</a>.
</span>
</div>
</div>
</div>
</details-menu>
<details-dialog
class="notifications-component-dialog "
data-target="notifications-list-subscription-form.customDialog"
aria-label="Custom dialog"
hidden
>
<div class="SelectMenu-modal notifications-component-dialog-modal overflow-visible">
<form data-target="notifications-list-subscription-form.customform" data-action="submit:notifications-list-subscription-form#submitCustomForm" action="/notifications/subscribe" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="UGAkJnPryJYCyZfrMFMku2pnY1DV_mOAvGRmJj8KveJZNrqSiKbbwZsQ5IWevN8t8_q1O_YEXKIzEqf6ChNS5A" autocomplete="off" />
<input type="hidden" name="repository_id" value="240786294">
<header class="d-sm-none SelectMenu-header pb-0 border-bottom-0 px-2 px-sm-3">
<h1 class="f3 SelectMenu-title d-inline-flex">
<button
class="color-bg-default border-0 px-2 py-0 m-0 Link--secondary f5"
aria-label="Return to menu"
type="button"
data-action="click:notifications-list-subscription-form#closeCustomDialog"
>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-arrow-left">
<path fill-rule="evenodd" d="M7.78 12.53a.75.75 0 01-1.06 0L2.47 8.28a.75.75 0 010-1.06l4.25-4.25a.75.75 0 011.06 1.06L4.81 7h7.44a.75.75 0 010 1.5H4.81l2.97 2.97a.75.75 0 010 1.06z"></path>
</svg>
</button>
Custom
</h1>
</header>
<header class="d-none d-sm-flex flex-items-start pt-1">
<button
class="border-0 px-2 pt-1 m-0 Link--secondary f5"
style="background-color: transparent;"
aria-label="Return to menu"
type="button"
data-action="click:notifications-list-subscription-form#closeCustomDialog"
>
<svg style="position: relative; left: 2px; top: 1px" aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-arrow-left">
<path fill-rule="evenodd" d="M7.78 12.53a.75.75 0 01-1.06 0L2.47 8.28a.75.75 0 010-1.06l4.25-4.25a.75.75 0 011.06 1.06L4.81 7h7.44a.75.75 0 010 1.5H4.81l2.97 2.97a.75.75 0 010 1.06z"></path>
</svg>
</button>
<h1 class="pt-1 pr-4 pb-0 pl-0 f5 text-bold">
Custom
</h1>
</header>
<fieldset>
<legend>
<div class="text-small color-fg-muted pt-0 pr-3 pb-3 pl-6 pl-sm-5 border-bottom mb-3">
Select events you want to be notified of in addition to participating and @mentions.
</div>
</legend>
<div data-target="notifications-list-subscription-form.labelInputs">
</div>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="Issue"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Issues
</label>
</div>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="PullRequest"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Pull requests
</label>
</div>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="Release"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Releases
</label>
</div>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="Discussion"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Discussions
</label>
</div>
<div class="form-checkbox mr-3 ml-6 ml-sm-5 mb-2 mt-0">
<label class="f5 text-normal">
<input
type="checkbox"
name="thread_types[]"
value="SecurityAlert"
data-targets="notifications-list-subscription-form.threadTypeCheckboxes"
data-action="change:notifications-list-subscription-form#threadTypeCheckboxesUpdated"
>
Security alerts
</label>
</div>
</fieldset>
<div class="pt-2 pb-3 px-3 d-flex flex-justify-start flex-row-reverse">
<button name="do" value="custom" data-target="notifications-list-subscription-form.customSubmit" disabled="disabled" type="submit" data-view-component="true" class="btn-primary btn-sm btn ml-2"> Apply
</button>
<button data-action="click:notifications-list-subscription-form#resetForm" data-close-dialog="" type="button" data-view-component="true" class="btn-sm btn"> Cancel
</button>
</div>
</form> </div>
</details-dialog>
<div class="notifications-component-dialog-overlay"></div>
</details>
</notifications-list-subscription-form>
</li>
<li>
<div class="float-left">
<details class="details-reset details-overlay details-overlay-dark " >
<summary data-hydro-click="{"event_type":"repository.click","payload":{"target":"FORK_BUTTON","repository_id":240786294,"originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="c49bf1806f0093441a79d5711b30db1b6147455850db4aa1856076f5710bb39f" data-ga-click="Repository, show fork modal, action:blob#show; text:Fork" aria-label="Fork your own copy of HarshCasper/Rotten-Scripts to your account" data-view-component="true" class="btn-sm btn"> <svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-repo-forked mr-2">
<path fill-rule="evenodd" d="M5 3.25a.75.75 0 11-1.5 0 .75.75 0 011.5 0zm0 2.122a2.25 2.25 0 10-1.5 0v.878A2.25 2.25 0 005.75 8.5h1.5v2.128a2.251 2.251 0 101.5 0V8.5h1.5a2.25 2.25 0 002.25-2.25v-.878a2.25 2.25 0 10-1.5 0v.878a.75.75 0 01-.75.75h-4.5A.75.75 0 015 6.25v-.878zm3.75 7.378a.75.75 0 11-1.5 0 .75.75 0 011.5 0zm3-8.75a.75.75 0 100-1.5.75.75 0 000 1.5z"></path>
</svg>Fork
<span id="repo-network-counter" data-pjax-replace="true" title="397" data-view-component="true" class="Counter">397</span>
</summary>
<details-dialog
class="Box d-flex flex-column anim-fade-in fast Box--overlay "
aria-label="Fork Rotten-Scripts"
src="/HarshCasper/Rotten-Scripts/fork?fragment=1"
preload
>
<div class="Box-header">
<button class="Box-btn-octicon btn-octicon float-right" type="button" aria-label="Close dialog" data-close-dialog>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
<h1 class="Box-title">Fork Rotten-Scripts</h1>
</div>
<div class="text-center overflow-auto">
<include-fragment>
<svg aria-label="Loading..." style="box-sizing: content-box; color: var(--color-icon-primary);" width="32" height="32" viewBox="0 0 16 16" fill="none" data-view-component="true" class="my-5 anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
<p class="f5 color-fg-muted">If this dialog fails to load, you can visit <a href="/HarshCasper/Rotten-Scripts/fork">the fork page</a> directly.</p>
</include-fragment>
</div>
</details-dialog>
</details>
</div>
</li>
<li>
<template class="js-unstar-confirmation-dialog-template">
<div class="Box-header">
<h2 class="Box-title">Unstar this repository?</h2>
</div>
<div class="Box-body">
<p class="mb-3">
This will remove {{ repoNameWithOwner }} from the {{ listsWithCount }} that it's been added to.
</p>
<div class="form-actions">
<form class="js-social-confirmation-form" action="{{ confirmUrl }}" accept-charset="UTF-8" method="post">
<input type="hidden" name="authenticity_token" value="{{ confirmCsrfToken }}">
<input type="hidden" name="confirm" value="true">
<button data-close-dialog="true" type="submit" data-view-component="true" class="btn-danger btn width-full"> Unstar
</button>
</form> </div>
</div>
</template>
<div data-view-component="true" class="js-toggler-container js-social-container starring-container BtnGroup d-flex">
<form class="starred js-social-form BtnGroup-parent flex-auto js-deferred-toggler-target" action="/HarshCasper/Rotten-Scripts/unstar" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="TpmchdnzTK9T6xkiQz_8-DGznKb9DVCM6X7H2e7oZzGZNMzllKLC0dPFS9-79Oz8tNYlrFXi5f5MMCmTcLRrwA" autocomplete="off" />
<input type="hidden" value="wX6m2qs4NSKAAMm8sRLPlbzD_EKaZdk_3DF9wvqUHagW0_a65mm7XAAum0FJ2d-ROaZFSDKKbE15f5OIZMgRWQ" data-csrf="true" class="js-confirm-csrf-token" />
<input type="hidden" name="context" value="repository">
<button data-hydro-click="{"event_type":"repository.click","payload":{"target":"UNSTAR_BUTTON","repository_id":240786294,"originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="b300a64a5c218fbe1bd3c051da20554b9af4cf483df56a7e7034e9c1ec23e2cc" data-ga-click="Repository, click unstar button, action:blob#show; text:Unstar" aria-label="Unstar this repository" type="submit" data-view-component="true" class="rounded-left-2 border-right-0 btn-sm btn BtnGroup-item"> <svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-star-fill starred-button-icon d-inline-block mr-2">
<path fill-rule="evenodd" d="M8 .25a.75.75 0 01.673.418l1.882 3.815 4.21.612a.75.75 0 01.416 1.279l-3.046 2.97.719 4.192a.75.75 0 01-1.088.791L8 12.347l-3.766 1.98a.75.75 0 01-1.088-.79l.72-4.194L.818 6.374a.75.75 0 01.416-1.28l4.21-.611L7.327.668A.75.75 0 018 .25z"></path>
</svg><span data-view-component="true" class="d-inline">
Starred
</span> <span id="repo-stars-counter-unstar" aria-label="944 users starred this repository" data-singular-suffix="user starred this repository" data-plural-suffix="users starred this repository" data-pjax-replace="true" title="944" data-view-component="true" class="Counter js-social-count">944</span>
</button></form>
<form class="unstarred js-social-form BtnGroup-parent flex-auto" action="/HarshCasper/Rotten-Scripts/star" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="j1QOIAXEjzjGUeIETSgXL-nXKozd5p0kUKGl_74em93yu6MHRvzuWi5UMpW3-62xcFz8WFS_c2s9ltNRBIq_1A" autocomplete="off" />
<input type="hidden" name="context" value="repository">
<button data-hydro-click="{"event_type":"repository.click","payload":{"target":"STAR_BUTTON","repository_id":240786294,"originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="1d3c646cda9f21c5878135cad9b2f901473d9979c69cda3da4d600bbbb6a0de8" data-ga-click="Repository, click star button, action:blob#show; text:Star" aria-label="Star this repository" type="submit" data-view-component="true" class="js-toggler-target rounded-left-2 btn-sm btn BtnGroup-item"> <svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-star d-inline-block mr-2">
<path fill-rule="evenodd" d="M8 .25a.75.75 0 01.673.418l1.882 3.815 4.21.612a.75.75 0 01.416 1.279l-3.046 2.97.719 4.192a.75.75 0 01-1.088.791L8 12.347l-3.766 1.98a.75.75 0 01-1.088-.79l.72-4.194L.818 6.374a.75.75 0 01.416-1.28l4.21-.611L7.327.668A.75.75 0 018 .25zm0 2.445L6.615 5.5a.75.75 0 01-.564.41l-3.097.45 2.24 2.184a.75.75 0 01.216.664l-.528 3.084 2.769-1.456a.75.75 0 01.698 0l2.77 1.456-.53-3.084a.75.75 0 01.216-.664l2.24-2.183-3.096-.45a.75.75 0 01-.564-.41L8 2.694v.001z"></path>
</svg><span data-view-component="true" class="d-inline">
Star
</span> <span id="repo-stars-counter-star" aria-label="944 users starred this repository" data-singular-suffix="user starred this repository" data-plural-suffix="users starred this repository" data-pjax-replace="true" title="944" data-view-component="true" class="Counter js-social-count">944</span>
</button></form>
<details id="details-2b0d0b" data-view-component="true" class="details-reset details-overlay BtnGroup-parent js-user-list-menu d-inline-block position-relative">
<summary aria-label="Add this repository to a list" data-view-component="true" class="btn-sm btn BtnGroup-item px-2 float-none"> <svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-triangle-down">
<path d="M4.427 7.427l3.396 3.396a.25.25 0 00.354 0l3.396-3.396A.25.25 0 0011.396 7H4.604a.25.25 0 00-.177.427z"></path>
</svg>
</summary> <template class="js-user-list-create-dialog-template" data-label="Create list">
<div class="Box-header">
<h2 class="Box-title">Create list</h2>
</div>
<form class="Box-body d-flex flex-column p-3 js-user-list-form" action="/stars/keeganskeate/lists" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="MC6BHToSbeN6V8HI4Xb93xHFH9-1qk8SunwvctB9mWDnsXokZEs15i2ktfkBqbgf5r-lCYLCo4jRXxdLtrWUUw" autocomplete="off" />
<p class="color-fg-subtle mb-3">Create a list to organize your starred repositories.</p>
<input type="hidden" name="repository_id" value="{{ repositoryId }}">
<div class="form-group mx-0 mt-0 mb-2 js-user-list-input-container js-characters-remaining-container position-relative">
<auto-check src="/stars/keeganskeate/list-check?attr=name" required>
<text-expander keys=":" data-emoji-url="/autocomplete/emoji">
<input
type="text"
name="user_list[name]"
class="form-control js-user-list-input js-characters-remaining-field"
placeholder="⭐️ Name this list"
value=""
aria-label="List name"
maxlength="32"
data-maxlength="32"
autofocus
required
>
</text-expander>
<input type="hidden" value="z-pfk4RZ7fcOfW0l0kQJk_tpAW0t-jmjJSP-MIqLv3bnu_FucTNZdw-Yq2Qu1SB_hfR_ABlEkDuuOMotQduj8A" data-csrf="true" />
</auto-check>
<p
class="note error position-relative js-user-list-error"
hidden
>
Name .
</p>
<p class="mt-1 text-small float-right js-characters-remaining" data-suffix="remaining" hidden>
32 remaining
</p>
</div>
<div class="form-group mx-0 mt-0 mb-2 js-user-list-input-container js-characters-remaining-container position-relative">
<text-expander keys=":" data-emoji-url="/autocomplete/emoji">
<textarea
name="user_list[description]"
class="form-control js-user-list-input js-characters-remaining-field"
placeholder="Write a description"
aria-label="List description"
maxlength="160"
data-maxlength="160"
style="height: 74px; min-height: 74px"
></textarea>
</text-expander>
<p
class="note error position-relative js-user-list-error"
hidden
>
Description .
</p>
<p class="mt-1 text-small float-right js-characters-remaining" data-suffix="remaining" hidden>
160 remaining
</p>
</div>
<div hidden="hidden" data-generic-message="Unable to save your list at this time." data-view-component="true" class="js-user-list-base flash flash-error mx-0 mt-0 mb-2">
.
</div> <button disabled="disabled" data-disable-invalid="true" data-submitting-message="Creating..." type="submit" data-view-component="true" class="btn-primary btn btn-block mt-2"> Create
</button>
<p class="note mt-2 mb-0">
<strong>Tip:</strong> type <code>:</code> to add emoji to the name or description.
</p>
</form>
<div data-view-component="true" class="Box-footer Box-row--gray text-small color-fg-muted d-flex flex-items-baseline py-2">
<span title="Feature Release Label: Beta" aria-label="Feature Release Label: Beta" data-view-component="true" class="Label Label--success Label--inline px-2 mr-2">Beta</span>
<span class="mr-1">Lists are currently in beta.</span>
<a href="/github/feedback/discussions/categories/lists-feedback">Share feedback and report bugs.</a>
</div>
</template>
<details-menu
class="SelectMenu right-0"
src="/HarshCasper/Rotten-Scripts/lists"
role="menu"
>
<div class="SelectMenu-modal">
<button class="SelectMenu-closeButton position-absolute right-0 m-2" type="button" aria-label="Close menu" data-toggle-for="details-2b0d0b">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
<div
id="filter-menu-2b0d0b"
class="d-flex flex-column flex-1 overflow-hidden"
>
<div
class="SelectMenu-list"
>
<include-fragment class="SelectMenu-loading" aria-label="Loading">
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" width="32" height="32" viewBox="0 0 16 16" fill="none" data-view-component="true" class="anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</include-fragment>
</div>
</div>
</div>
</details-menu>
</details>
</div>
</li>
<li>
</li>
</ul>
</div>
<div id="responsive-meta-container" data-pjax-replace>
</div>
<nav data-pjax="#js-repo-pjax-container" aria-label="Repository" data-view-component="true" class="js-repo-nav js-sidenav-container-pjax js-responsive-underlinenav overflow-hidden UnderlineNav px-3 px-md-4 px-lg-5">
<ul data-view-component="true" class="UnderlineNav-body list-style-none">
<li data-view-component="true" class="d-inline-flex">
<a id="code-tab" href="/HarshCasper/Rotten-Scripts" data-tab-item="i0code-tab" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages repo_deployments /HarshCasper/Rotten-Scripts" data-pjax="#repo-content-pjax-container" data-hotkey="g c" data-ga-click="Repository, Navigation click, Code tab" aria-current="page" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item selected">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-code UnderlineNav-octicon d-none d-sm-inline">
<path fill-rule="evenodd" d="M4.72 3.22a.75.75 0 011.06 1.06L2.06 8l3.72 3.72a.75.75 0 11-1.06 1.06L.47 8.53a.75.75 0 010-1.06l4.25-4.25zm6.56 0a.75.75 0 10-1.06 1.06L13.94 8l-3.72 3.72a.75.75 0 101.06 1.06l4.25-4.25a.75.75 0 000-1.06l-4.25-4.25z"></path>
</svg>
<span data-content="Code">Code</span>
<span id="code-repo-tab-count" data-pjax-replace="" title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
<li data-view-component="true" class="d-inline-flex">
<a id="issues-tab" href="/HarshCasper/Rotten-Scripts/issues" data-tab-item="i1issues-tab" data-selected-links="repo_issues repo_labels repo_milestones /HarshCasper/Rotten-Scripts/issues" data-pjax="#repo-content-pjax-container" data-hotkey="g i" data-ga-click="Repository, Navigation click, Issues tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-issue-opened UnderlineNav-octicon d-none d-sm-inline">
<path d="M8 9.5a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path><path fill-rule="evenodd" d="M8 0a8 8 0 100 16A8 8 0 008 0zM1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0z"></path>
</svg>
<span data-content="Issues">Issues</span>
<span id="issues-repo-tab-count" data-pjax-replace="" title="13" data-view-component="true" class="Counter">13</span>
</a></li>
<li data-view-component="true" class="d-inline-flex">
<a id="pull-requests-tab" href="/HarshCasper/Rotten-Scripts/pulls" data-tab-item="i2pull-requests-tab" data-selected-links="repo_pulls checks /HarshCasper/Rotten-Scripts/pulls" data-pjax="#repo-content-pjax-container" data-hotkey="g p" data-ga-click="Repository, Navigation click, Pull requests tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-git-pull-request UnderlineNav-octicon d-none d-sm-inline">
<path fill-rule="evenodd" d="M7.177 3.073L9.573.677A.25.25 0 0110 .854v4.792a.25.25 0 01-.427.177L7.177 3.427a.25.25 0 010-.354zM3.75 2.5a.75.75 0 100 1.5.75.75 0 000-1.5zm-2.25.75a2.25 2.25 0 113 2.122v5.256a2.251 2.251 0 11-1.5 0V5.372A2.25 2.25 0 011.5 3.25zM11 2.5h-1V4h1a1 1 0 011 1v5.628a2.251 2.251 0 101.5 0V5A2.5 2.5 0 0011 2.5zm1 10.25a.75.75 0 111.5 0 .75.75 0 01-1.5 0zM3.75 12a.75.75 0 100 1.5.75.75 0 000-1.5z"></path>
</svg>
<span data-content="Pull requests">Pull requests</span>
<span id="pull-requests-repo-tab-count" data-pjax-replace="" title="16" data-view-component="true" class="Counter">16</span>
</a></li>
<li data-view-component="true" class="d-inline-flex">
<a id="discussions-tab" href="/HarshCasper/Rotten-Scripts/discussions" data-tab-item="i3discussions-tab" data-selected-links="repo_discussions /HarshCasper/Rotten-Scripts/discussions" data-pjax="#repo-content-pjax-container" data-hotkey="g g" data-ga-click="Repository, Navigation click, Discussions tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-comment-discussion UnderlineNav-octicon d-none d-sm-inline">
<path fill-rule="evenodd" d="M1.5 2.75a.25.25 0 01.25-.25h8.5a.25.25 0 01.25.25v5.5a.25.25 0 01-.25.25h-3.5a.75.75 0 00-.53.22L3.5 11.44V9.25a.75.75 0 00-.75-.75h-1a.25.25 0 01-.25-.25v-5.5zM1.75 1A1.75 1.75 0 000 2.75v5.5C0 9.216.784 10 1.75 10H2v1.543a1.457 1.457 0 002.487 1.03L7.061 10h3.189A1.75 1.75 0 0012 8.25v-5.5A1.75 1.75 0 0010.25 1h-8.5zM14.5 4.75a.25.25 0 00-.25-.25h-.5a.75.75 0 110-1.5h.5c.966 0 1.75.784 1.75 1.75v5.5A1.75 1.75 0 0114.25 12H14v1.543a1.457 1.457 0 01-2.487 1.03L9.22 12.28a.75.75 0 111.06-1.06l2.22 2.22v-2.19a.75.75 0 01.75-.75h1a.25.25 0 00.25-.25v-5.5z"></path>
</svg>
<span data-content="Discussions">Discussions</span>
<span id="discussions-repo-tab-count" data-pjax-replace="" title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
<li data-view-component="true" class="d-inline-flex">
<a id="actions-tab" href="/HarshCasper/Rotten-Scripts/actions" data-tab-item="i4actions-tab" data-selected-links="repo_actions /HarshCasper/Rotten-Scripts/actions" data-pjax="#repo-content-pjax-container" data-hotkey="g a" data-ga-click="Repository, Navigation click, Actions tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-play UnderlineNav-octicon d-none d-sm-inline">
<path fill-rule="evenodd" d="M1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0zM8 0a8 8 0 100 16A8 8 0 008 0zM6.379 5.227A.25.25 0 006 5.442v5.117a.25.25 0 00.379.214l4.264-2.559a.25.25 0 000-.428L6.379 5.227z"></path>
</svg>
<span data-content="Actions">Actions</span>
<span id="actions-repo-tab-count" data-pjax-replace="" title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
<li data-view-component="true" class="d-inline-flex">
<a id="projects-tab" href="/HarshCasper/Rotten-Scripts/projects?type=beta" data-tab-item="i5projects-tab" data-selected-links="repo_projects new_repo_project repo_project /HarshCasper/Rotten-Scripts/projects?type=beta" data-pjax="#repo-content-pjax-container" data-hotkey="g b" data-ga-click="Repository, Navigation click, Projects tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-table UnderlineNav-octicon d-none d-sm-inline">
<path fill-rule="evenodd" d="M0 1.75C0 .784.784 0 1.75 0h12.5C15.216 0 16 .784 16 1.75v3.585a.746.746 0 010 .83v8.085A1.75 1.75 0 0114.25 16H6.309a.748.748 0 01-1.118 0H1.75A1.75 1.75 0 010 14.25V6.165a.746.746 0 010-.83V1.75zM1.5 6.5v7.75c0 .138.112.25.25.25H5v-8H1.5zM5 5H1.5V1.75a.25.25 0 01.25-.25H5V5zm1.5 1.5v8h7.75a.25.25 0 00.25-.25V6.5h-8zm8-1.5h-8V1.5h7.75a.25.25 0 01.25.25V5z"></path>
</svg>
<span data-content="Projects">Projects</span>
<span id="projects-repo-tab-count" data-pjax-replace="" title="0" hidden="hidden" data-view-component="true" class="Counter">0</span>
</a></li>
<li data-view-component="true" class="d-inline-flex">
<a id="wiki-tab" href="/HarshCasper/Rotten-Scripts/wiki" data-tab-item="i6wiki-tab" data-selected-links="repo_wiki /HarshCasper/Rotten-Scripts/wiki" data-pjax="#repo-content-pjax-container" data-hotkey="g w" data-ga-click="Repository, Navigation click, Wikis tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-book UnderlineNav-octicon d-none d-sm-inline">
<path fill-rule="evenodd" d="M0 1.75A.75.75 0 01.75 1h4.253c1.227 0 2.317.59 3 1.501A3.744 3.744 0 0111.006 1h4.245a.75.75 0 01.75.75v10.5a.75.75 0 01-.75.75h-4.507a2.25 2.25 0 00-1.591.659l-.622.621a.75.75 0 01-1.06 0l-.622-.621A2.25 2.25 0 005.258 13H.75a.75.75 0 01-.75-.75V1.75zm8.755 3a2.25 2.25 0 012.25-2.25H14.5v9h-3.757c-.71 0-1.4.201-1.992.572l.004-7.322zm-1.504 7.324l.004-5.073-.002-2.253A2.25 2.25 0 005.003 2.5H1.5v9h3.757a3.75 3.75 0 011.994.574z"></path>
</svg>
<span data-content="Wiki">Wiki</span>
<span id="wiki-repo-tab-count" data-pjax-replace="" title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
<li data-view-component="true" class="d-inline-flex">
<a id="security-tab" href="/HarshCasper/Rotten-Scripts/security" data-tab-item="i7security-tab" data-selected-links="security overview alerts policy token_scanning code_scanning /HarshCasper/Rotten-Scripts/security" data-pjax="#repo-content-pjax-container" data-hotkey="g s" data-ga-click="Repository, Navigation click, Security tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-shield UnderlineNav-octicon d-none d-sm-inline">
<path fill-rule="evenodd" d="M7.467.133a1.75 1.75 0 011.066 0l5.25 1.68A1.75 1.75 0 0115 3.48V7c0 1.566-.32 3.182-1.303 4.682-.983 1.498-2.585 2.813-5.032 3.855a1.7 1.7 0 01-1.33 0c-2.447-1.042-4.049-2.357-5.032-3.855C1.32 10.182 1 8.566 1 7V3.48a1.75 1.75 0 011.217-1.667l5.25-1.68zm.61 1.429a.25.25 0 00-.153 0l-5.25 1.68a.25.25 0 00-.174.238V7c0 1.358.275 2.666 1.057 3.86.784 1.194 2.121 2.34 4.366 3.297a.2.2 0 00.154 0c2.245-.956 3.582-2.104 4.366-3.298C13.225 9.666 13.5 8.36 13.5 7V3.48a.25.25 0 00-.174-.237l-5.25-1.68zM9 10.5a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.75a.75.75 0 10-1.5 0v3a.75.75 0 001.5 0v-3z"></path>
</svg>
<span data-content="Security">Security</span>
<include-fragment src="/HarshCasper/Rotten-Scripts/security/overall-count" accept="text/fragment+html"></include-fragment>
</a></li>
<li data-view-component="true" class="d-inline-flex">
<a id="insights-tab" href="/HarshCasper/Rotten-Scripts/pulse" data-tab-item="i8insights-tab" data-selected-links="repo_graphs repo_contributors dependency_graph dependabot_updates pulse people community /HarshCasper/Rotten-Scripts/pulse" data-pjax="#repo-content-pjax-container" data-ga-click="Repository, Navigation click, Insights tab" data-view-component="true" class="UnderlineNav-item hx_underlinenav-item no-wrap js-responsive-underlinenav-item js-selected-navigation-item">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-graph UnderlineNav-octicon d-none d-sm-inline">
<path fill-rule="evenodd" d="M1.5 1.75a.75.75 0 00-1.5 0v12.5c0 .414.336.75.75.75h14.5a.75.75 0 000-1.5H1.5V1.75zm14.28 2.53a.75.75 0 00-1.06-1.06L10 7.94 7.53 5.47a.75.75 0 00-1.06 0L3.22 8.72a.75.75 0 001.06 1.06L7 7.06l2.47 2.47a.75.75 0 001.06 0l5.25-5.25z"></path>
</svg>
<span data-content="Insights">Insights</span>
<span id="insights-repo-tab-count" data-pjax-replace="" title="Not available" data-view-component="true" class="Counter"></span>
</a></li>
</ul>
<div style="visibility:hidden;" data-view-component="true" class="UnderlineNav-actions js-responsive-underlinenav-overflow position-absolute pr-3 pr-md-4 pr-lg-5 right-0"> <details data-view-component="true" class="details-overlay details-reset position-relative">
<summary role="button" data-view-component="true"> <div class="UnderlineNav-item mr-0 border-0">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-kebab-horizontal">
<path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path>
</svg>
<span class="sr-only">More</span>
</div>
</summary>
<div data-view-component="true"> <details-menu role="menu" data-view-component="true" class="dropdown-menu dropdown-menu-sw">
<ul>
<li data-menu-item="i0code-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item selected dropdown-item" aria-current="page" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages repo_deployments /HarshCasper/Rotten-Scripts" href="/HarshCasper/Rotten-Scripts">
Code
</a> </li>
<li data-menu-item="i1issues-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_issues repo_labels repo_milestones /HarshCasper/Rotten-Scripts/issues" href="/HarshCasper/Rotten-Scripts/issues">
Issues
</a> </li>
<li data-menu-item="i2pull-requests-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_pulls checks /HarshCasper/Rotten-Scripts/pulls" href="/HarshCasper/Rotten-Scripts/pulls">
Pull requests
</a> </li>
<li data-menu-item="i3discussions-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_discussions /HarshCasper/Rotten-Scripts/discussions" href="/HarshCasper/Rotten-Scripts/discussions">
Discussions
</a> </li>
<li data-menu-item="i4actions-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_actions /HarshCasper/Rotten-Scripts/actions" href="/HarshCasper/Rotten-Scripts/actions">
Actions
</a> </li>
<li data-menu-item="i5projects-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_projects new_repo_project repo_project /HarshCasper/Rotten-Scripts/projects?type=beta" href="/HarshCasper/Rotten-Scripts/projects?type=beta">
Projects
</a> </li>
<li data-menu-item="i6wiki-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_wiki /HarshCasper/Rotten-Scripts/wiki" href="/HarshCasper/Rotten-Scripts/wiki">
Wiki
</a> </li>
<li data-menu-item="i7security-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="security overview alerts policy token_scanning code_scanning /HarshCasper/Rotten-Scripts/security" href="/HarshCasper/Rotten-Scripts/security">
Security
</a> </li>
<li data-menu-item="i8insights-tab" hidden>
<a role="menuitem" class="js-selected-navigation-item dropdown-item" data-selected-links="repo_graphs repo_contributors dependency_graph dependabot_updates pulse people community /HarshCasper/Rotten-Scripts/pulse" href="/HarshCasper/Rotten-Scripts/pulse">
Insights
</a> </li>
</ul>
</details-menu></div>
</details></div>
</nav>
</div>
<div class="clearfix new-discussion-timeline container-xl px-3 px-md-4 px-lg-5">
<div id="repo-content-pjax-container" class="repository-content " >
<a href="https://github.dev/" class="d-none js-github-dev-shortcut" data-hotkey=".">Open in github.dev</a>
<a href="https://github.dev/" class="d-none js-github-dev-new-tab-shortcut" data-hotkey="Shift+.,Shift+>,>" target="_blank">Open in a new github.dev tab</a>
<div>
<a class="d-none js-permalink-shortcut" data-hotkey="y" href="/HarshCasper/Rotten-Scripts/blob/00c40043521f526a2e3c203a5830f7c5cc12acb4/Python/Frames_to_Video_converter/frames_to_video_converter.py">Permalink</a>
<div class="d-flex flex-items-start flex-shrink-0 pb-3 flex-wrap flex-md-nowrap flex-justify-between flex-md-justify-start">
<div class="position-relative">
<details class="details-reset details-overlay mr-0 mb-0 " id="branch-select-menu">
<summary class="btn css-truncate"
data-hotkey="w"
title="Switch branches or tags">
<svg text="gray" aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-git-branch">
<path fill-rule="evenodd" d="M11.75 2.5a.75.75 0 100 1.5.75.75 0 000-1.5zm-2.25.75a2.25 2.25 0 113 2.122V6A2.5 2.5 0 0110 8.5H6a1 1 0 00-1 1v1.128a2.251 2.251 0 11-1.5 0V5.372a2.25 2.25 0 111.5 0v1.836A2.492 2.492 0 016 7h4a1 1 0 001-1v-.628A2.25 2.25 0 019.5 3.25zM4.25 12a.75.75 0 100 1.5.75.75 0 000-1.5zM3.5 3.25a.75.75 0 111.5 0 .75.75 0 01-1.5 0z"></path>
</svg>
<span class="css-truncate-target" data-menu-button>master</span>
<span class="dropdown-caret"></span>
</summary>
<div class="SelectMenu">
<div class="SelectMenu-modal">
<header class="SelectMenu-header">
<span class="SelectMenu-title">Switch branches/tags</span>
<button class="SelectMenu-closeButton" type="button" data-toggle-for="branch-select-menu"><svg aria-label="Close menu" aria-hidden="false" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg></button>
</header>
<input-demux data-action="tab-container-change:input-demux#storeInput tab-container-changed:input-demux#updateInput">
<tab-container class="d-flex flex-column js-branches-tags-tabs" style="min-height: 0;">
<div class="SelectMenu-filter">
<input data-target="input-demux.source"
id="context-commitish-filter-field"
class="SelectMenu-input form-control"
aria-owns="ref-list-branches"
data-controls-ref-menu-id="ref-list-branches"
autofocus
autocomplete="off"
aria-label="Filter branches/tags"
placeholder="Filter branches/tags"
type="text"
>
</div>
<div class="SelectMenu-tabs" role="tablist" data-target="input-demux.control" >
<button class="SelectMenu-tab" type="button" role="tab" aria-selected="true">Branches</button>
<button class="SelectMenu-tab" type="button" role="tab">Tags</button>
</div>
<div role="tabpanel" id="ref-list-branches" data-filter-placeholder="Filter branches/tags" tabindex="" class="d-flex flex-column flex-auto overflow-auto">
<ref-selector
type="branch"
data-targets="input-demux.sinks"
data-action="
input-entered:ref-selector#inputEntered
tab-selected:ref-selector#tabSelected
focus-list:ref-selector#focusFirstListMember
"
query-endpoint="/HarshCasper/Rotten-Scripts/refs"
cache-key="v0:1642859802.7130518"
current-committish="bWFzdGVy"
default-branch="bWFzdGVy"
name-with-owner="SGFyc2hDYXNwZXIvUm90dGVuLVNjcmlwdHM="
prefetch-on-mouseover
>
<template data-target="ref-selector.fetchFailedTemplate">
<div class="SelectMenu-message" data-index="{{ index }}">Could not load branches</div>
</template>
<template data-target="ref-selector.noMatchTemplate">
<div class="SelectMenu-message">Nothing to show</div>
</template>
<!-- TODO: this max-height is necessary or else the branch list won't scroll. why? -->
<div data-target="ref-selector.listContainer" role="menu" class="SelectMenu-list " style="max-height: 330px" data-pjax="#repo-content-pjax-container">
<div class="SelectMenu-loading pt-3 pb-0 overflow-hidden" aria-label="Menu is loading">
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" width="32" height="32" viewBox="0 0 16 16" fill="none" data-view-component="true" class="anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</div>
</div>
<template data-target="ref-selector.itemTemplate">
<a href="https://github.com/HarshCasper/Rotten-Scripts/blob/{{ urlEncodedRefName }}/Python/Frames_to_Video_converter/frames_to_video_converter.py" class="SelectMenu-item" role="menuitemradio" rel="nofollow" aria-checked="{{ isCurrent }}" data-index="{{ index }}">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
<span class="flex-1 css-truncate css-truncate-overflow {{ isFilteringClass }}">{{ refName }}</span>
<span hidden="{{ isNotDefault }}" class="Label Label--secondary flex-self-start">default</span>
</a>
</template>
<footer class="SelectMenu-footer"><a href="/HarshCasper/Rotten-Scripts/branches">View all branches</a></footer>
</ref-selector>
</div>
<div role="tabpanel" id="tags-menu" data-filter-placeholder="Find a tag" tabindex="" hidden class="d-flex flex-column flex-auto overflow-auto">
<ref-selector
type="tag"
data-action="
input-entered:ref-selector#inputEntered
tab-selected:ref-selector#tabSelected
focus-list:ref-selector#focusFirstListMember
"
data-targets="input-demux.sinks"
query-endpoint="/HarshCasper/Rotten-Scripts/refs"
cache-key="v0:1642859802.7130518"
current-committish="bWFzdGVy"
default-branch="bWFzdGVy"
name-with-owner="SGFyc2hDYXNwZXIvUm90dGVuLVNjcmlwdHM="
>
<template data-target="ref-selector.fetchFailedTemplate">
<div class="SelectMenu-message" data-index="{{ index }}">Could not load tags</div>
</template>
<template data-target="ref-selector.noMatchTemplate">
<div class="SelectMenu-message" data-index="{{ index }}">Nothing to show</div>
</template>
<template data-target="ref-selector.itemTemplate">
<a href="https://github.com/HarshCasper/Rotten-Scripts/blob/{{ urlEncodedRefName }}/Python/Frames_to_Video_converter/frames_to_video_converter.py" class="SelectMenu-item" role="menuitemradio" rel="nofollow" aria-checked="{{ isCurrent }}" data-index="{{ index }}">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
<span class="flex-1 css-truncate css-truncate-overflow {{ isFilteringClass }}">{{ refName }}</span>
<span hidden="{{ isNotDefault }}" class="Label Label--secondary flex-self-start">default</span>
</a>
</template>
<div data-target="ref-selector.listContainer" role="menu" class="SelectMenu-list" style="max-height: 330px" data-pjax="#repo-content-pjax-container">
<div class="SelectMenu-loading pt-3 pb-0 overflow-hidden" aria-label="Menu is loading">
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" width="32" height="32" viewBox="0 0 16 16" fill="none" data-view-component="true" class="anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</div>
</div>
<footer class="SelectMenu-footer"><a href="/HarshCasper/Rotten-Scripts/tags">View all tags</a></footer>
</ref-selector>
</div>
</tab-container>
</input-demux>
</div>
</div>
</details>
</div>
<h2 id="blob-path" class="breadcrumb flex-auto flex-self-center min-width-0 text-normal mx-2 width-full width-md-auto flex-order-1 flex-md-order-none mt-3 mt-md-0">
<span class="js-repo-root text-bold"><span class="js-path-segment d-inline-block wb-break-all"><a data-pjax="#repo-content-pjax-container" href="/HarshCasper/Rotten-Scripts"><span>Rotten-Scripts</span></a></span></span><span class="separator">/</span><span class="js-path-segment d-inline-block wb-break-all"><a data-pjax="#repo-content-pjax-container" href="/HarshCasper/Rotten-Scripts/tree/master/Python"><span>Python</span></a></span><span class="separator">/</span><span class="js-path-segment d-inline-block wb-break-all"><a data-pjax="#repo-content-pjax-container" href="/HarshCasper/Rotten-Scripts/tree/master/Python/Frames_to_Video_converter"><span>Frames_to_Video_converter</span></a></span><span class="separator">/</span><strong class="final-path">frames_to_video_converter.py</strong>
<span class="separator">/</span><details class="details-reset details-overlay d-inline" id="jumpto-symbol-select-menu">
<summary aria-haspopup="true" data-hotkey="r" data-hydro-click="{"event_type":"code_navigation.click_on_blob_definitions","payload":{"action":"click_on_blob_definitions","repository_id":240786294,"ref":"master","language":"Python","backend":"ALEPH_FUZZY","code_nav_context":"BLOB_VIEW","retry_backend":"","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="6d3391340b77a60fc9b14878b6163a4dd0b5466bf66eac80518f258edcc81bc8" data-view-component="true" class="Link--secondary css-truncate btn-link"> <svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-code">
<path fill-rule="evenodd" d="M4.72 3.22a.75.75 0 011.06 1.06L2.06 8l3.72 3.72a.75.75 0 11-1.06 1.06L.47 8.53a.75.75 0 010-1.06l4.25-4.25zm6.56 0a.75.75 0 10-1.06 1.06L13.94 8l-3.72 3.72a.75.75 0 101.06 1.06l4.25-4.25a.75.75 0 000-1.06l-4.25-4.25z"></path>
</svg>
<span data-menu-button>Jump to</span>
<span class="dropdown-caret"></span>
</summary> <details-menu class="SelectMenu SelectMenu--hasFilter" role="menu">
<div class="SelectMenu-modal">
<header class="SelectMenu-header">
<span class="SelectMenu-title">Code definitions</span>
<button class="SelectMenu-closeButton" type="button" data-toggle-for="jumpto-symbol-select-menu">
<svg aria-label="Close menu" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
</header>
<div class="SelectMenu-filter">
<input
class="SelectMenu-input form-control js-filterable-field"
id="jumpto-symbols-filter-field"
type="text"
autocomplete="off"
spellcheck="false"
autofocus
placeholder="Filter definitions"
aria-label="Filter definitions">
</div>
<div class="SelectMenu-list">
<div data-filterable-for="jumpto-symbols-filter-field" data-filterable-type="substring">
<a class="SelectMenu-item d-flex flex-justify-between css-truncate" role="menuitemradio" aria-checked="false" rel="nofollow" data-hydro-click="{"event_type":"code_navigation.navigate_to_blob_definition","payload":{"action":"navigate_to_blob_definition","repository_id":240786294,"ref":"master","language":"Python","backend":"ALEPH_FUZZY","code_nav_context":"BLOB_VIEW","retry_backend":"","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="7ad203e1303f727270682c1a928259cf1468c3cc7863f4544a022e23ac2a3f57" href="/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py#L12">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check SelectMenu-icon SelectMenu-icon--check">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
<span class="flex-auto css-truncate-target" data-menu-button-text>convert_frames_to_video</span>
<span class="flex-auto d-flex flex-justify-end">Function</span>
</a> </div>
</div>
<footer class="SelectMenu-footer">
<div class="d-flex flex-justify-between">
Code navigation index up-to-date
<svg class="octicon octicon-dot-fill text-green" viewBox="0 0 16 16" version="1.1" width="16" height="16" aria-hidden="true"><path fill-rule="evenodd" d="M8 4a4 4 0 100 8 4 4 0 000-8z"></path></svg>
</div>
</footer>
</div>
</details-menu>
</details>
</h2>
<a href="/HarshCasper/Rotten-Scripts/find/master"
class="js-pjax-capture-input btn mr-2 d-none d-md-block"
data-pjax
data-hotkey="t">
Go to file
</a>
<details id="blob-more-options-details" data-view-component="true" class="details-overlay details-reset position-relative">
<summary role="button" data-view-component="true" class="btn"> <svg aria-label="More options" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-kebab-horizontal">
<path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path>
</svg>
</summary>
<div data-view-component="true"> <ul class="dropdown-menu dropdown-menu-sw">
<li class="d-block d-md-none">
<a class="dropdown-item d-flex flex-items-baseline" data-hydro-click="{"event_type":"repository.click","payload":{"target":"FIND_FILE_BUTTON","repository_id":240786294,"originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="1167715960f0a80e85e6f4c4484166773b676a0fdb7f5352a144407a75a38b43" data-ga-click="Repository, find file, location:repo overview" data-hotkey="t" data-pjax="true" href="/HarshCasper/Rotten-Scripts/find/master">
<span class="flex-auto">Go to file</span>
<span class="text-small color-fg-muted" aria-hidden="true">T</span>
</a> </li>
<li data-toggle-for="blob-more-options-details">
<button data-toggle-for="jumpto-line-details-dialog" type="button" data-view-component="true" class="dropdown-item btn-link"> <span class="d-flex flex-items-baseline">
<span class="flex-auto">Go to line</span>
<span class="text-small color-fg-muted" aria-hidden="true">L</span>
</span>
</button> </li>
<li data-toggle-for="blob-more-options-details">
<button data-toggle-for="jumpto-symbol-select-menu" type="button" data-view-component="true" class="dropdown-item btn-link"> <span class="d-flex flex-items-baseline">
<span class="flex-auto">Go to definition</span>
<span class="text-small color-fg-muted" aria-hidden="true">R</span>
</span>
</button> </li>
<li class="dropdown-divider" role="none"></li>
<li>
<clipboard-copy data-toggle-for="blob-more-options-details" aria-label="Copy path" value="Python/Frames_to_Video_converter/frames_to_video_converter.py" data-view-component="true" class="dropdown-item cursor-pointer">
Copy path
</clipboard-copy> </li>
<li>
<clipboard-copy data-toggle-for="blob-more-options-details" aria-label="Copy permalink" value="https://github.com/HarshCasper/Rotten-Scripts/blob/00c40043521f526a2e3c203a5830f7c5cc12acb4/Python/Frames_to_Video_converter/frames_to_video_converter.py" data-view-component="true" class="dropdown-item cursor-pointer">
<span class="d-flex flex-items-baseline">
<span class="flex-auto">Copy permalink</span>
</span>
</clipboard-copy> </li>
</ul>
</div>
</details> </div>
<div id="spoof-warning" class="mt-0 pb-3" hidden aria-hidden>
<div data-view-component="true" class="flash flash-warn mt-0 clearfix">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert float-left mt-1">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
<div class="overflow-hidden">This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.</div>
</div></div>
<include-fragment src="/HarshCasper/Rotten-Scripts/spoofed_commit_check/00c40043521f526a2e3c203a5830f7c5cc12acb4" data-test-selector="spoofed-commit-check"></include-fragment>
<div class="Box d-flex flex-column flex-shrink-0 mb-3">
<div class="Box-header Details js-details-container">
<div class="d-flex flex-items-center">
<span class="flex-shrink-0 ml-n1 mr-n1 mt-n1 mb-n1">
<a rel="author" data-skip-pjax="true" data-hovercard-type="user" data-hovercard-url="/users/HarshCasper/hovercard" data-octo-click="hovercard-link-click" data-octo-dimensions="link_type:self" href="/HarshCasper"><img class="avatar avatar-user" src="https://avatars.githubusercontent.com/u/47351025?s=48&v=4" width="24" height="24" alt="@HarshCasper" /></a>
</span>
<div class="flex-1 d-flex flex-items-center ml-3 min-width-0">
<div class="css-truncate css-truncate-overflow">
<a class="text-bold Link--primary" rel="author" data-hovercard-type="user" data-hovercard-url="/users/HarshCasper/hovercard" data-octo-click="hovercard-link-click" data-octo-dimensions="link_type:self" href="/HarshCasper">HarshCasper</a>
<span class="markdown-title">
<a data-pjax="true" title="chore: formatted code using black (#1134)" class="Link--secondary" href="/HarshCasper/Rotten-Scripts/commit/aef4cb6cc32535cccfdfa44991055ed56d7b09d4">chore: formatted code using black (</a><a class="issue-link js-issue-link" data-error-text="Failed to load title" data-id="925876908" data-permission-text="Title is private" data-url="https://github.com/HarshCasper/Rotten-Scripts/issues/1134" data-hovercard-type="pull_request" data-hovercard-url="/HarshCasper/Rotten-Scripts/pull/1134/hovercard" href="https://github.com/HarshCasper/Rotten-Scripts/pull/1134">#1134</a><a data-pjax="true" title="chore: formatted code using black (#1134)" class="Link--secondary" href="/HarshCasper/Rotten-Scripts/commit/aef4cb6cc32535cccfdfa44991055ed56d7b09d4">)</a>
</span>
</div>
<span class="ml-2">
<include-fragment accept="text/fragment+html" src="/HarshCasper/Rotten-Scripts/commit/aef4cb6cc32535cccfdfa44991055ed56d7b09d4/rollup?direction=e" class="d-inline"></include-fragment>
</span>
</div>
<div class="ml-3 d-flex flex-shrink-0 flex-items-center flex-justify-end color-fg-muted no-wrap">
<span class="d-none d-md-inline">
<span>Latest commit</span>
<a class="text-small text-mono Link--secondary" href="/HarshCasper/Rotten-Scripts/commit/aef4cb6cc32535cccfdfa44991055ed56d7b09d4" data-pjax>aef4cb6</a>
<span itemprop="dateModified"><relative-time datetime="2021-06-21T06:36:54Z" class="no-wrap">Jun 21, 2021</relative-time></span>
</span>
<a data-pjax href="/HarshCasper/Rotten-Scripts/commits/master/Python/Frames_to_Video_converter/frames_to_video_converter.py" class="ml-3 no-wrap Link--primary no-underline">
<svg text="gray" aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-history">
<path fill-rule="evenodd" d="M1.643 3.143L.427 1.927A.25.25 0 000 2.104V5.75c0 .138.112.25.25.25h3.646a.25.25 0 00.177-.427L2.715 4.215a6.5 6.5 0 11-1.18 4.458.75.75 0 10-1.493.154 8.001 8.001 0 101.6-5.684zM7.75 4a.75.75 0 01.75.75v2.992l2.028.812a.75.75 0 01-.557 1.392l-2.5-1A.75.75 0 017 8.25v-3.5A.75.75 0 017.75 4z"></path>
</svg>
<span class="d-none d-sm-inline">
<strong>History</strong>
</span>
</a>
</div>
</div>
</div>
<div class="Box-body d-flex flex-items-center flex-auto border-bottom-0 flex-wrap" >
<details class="details-reset details-overlay details-overlay-dark lh-default color-fg-default float-left mr-3" id="blob_contributors_box">
<summary class="Link--primary">
<svg text="gray" aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-people">
<path fill-rule="evenodd" d="M5.5 3.5a2 2 0 100 4 2 2 0 000-4zM2 5.5a3.5 3.5 0 115.898 2.549 5.507 5.507 0 013.034 4.084.75.75 0 11-1.482.235 4.001 4.001 0 00-7.9 0 .75.75 0 01-1.482-.236A5.507 5.507 0 013.102 8.05 3.49 3.49 0 012 5.5zM11 4a.75.75 0 100 1.5 1.5 1.5 0 01.666 2.844.75.75 0 00-.416.672v.352a.75.75 0 00.574.73c1.2.289 2.162 1.2 2.522 2.372a.75.75 0 101.434-.44 5.01 5.01 0 00-2.56-3.012A3 3 0 0011 4z"></path>
</svg>
<strong>3</strong>
contributors
</summary>
<details-dialog
class="Box Box--overlay d-flex flex-column anim-fade-in fast"
aria-label="Users who have contributed to this file"
src="/HarshCasper/Rotten-Scripts/contributors-list/master/Python/Frames_to_Video_converter/frames_to_video_converter.py" preload>
<div class="Box-header">
<button class="Box-btn-octicon btn-octicon float-right" type="button" aria-label="Close dialog" data-close-dialog>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
<h3 class="Box-title">
Users who have contributed to this file
</h3>
</div>
<include-fragment>
<svg style="box-sizing: content-box; color: var(--color-icon-primary);" width="32" height="32" viewBox="0 0 16 16" fill="none" data-view-component="true" class="my-3 mx-auto d-block anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</include-fragment>
</details-dialog>
</details>
<span class="">
<a class="avatar-link" data-hovercard-type="user" data-hovercard-url="/users/vybhav72954/hovercard" data-octo-click="hovercard-link-click" data-octo-dimensions="link_type:self" href="/HarshCasper/Rotten-Scripts/commits/master/Python/Frames_to_Video_converter/frames_to_video_converter.py?author=vybhav72954">
<img class="avatar mr-2 avatar-user" src="https://avatars.githubusercontent.com/u/49750343?s=48&v=4" width="24" height="24" alt="@vybhav72954" />
</a> <a class="avatar-link" data-hovercard-type="user" data-hovercard-url="/users/HeroicHitesh/hovercard" data-octo-click="hovercard-link-click" data-octo-dimensions="link_type:self" href="/HarshCasper/Rotten-Scripts/commits/master/Python/Frames_to_Video_converter/frames_to_video_converter.py?author=HeroicHitesh">
<img class="avatar mr-2 avatar-user" src="https://avatars.githubusercontent.com/u/37622734?s=48&v=4" width="24" height="24" alt="@HeroicHitesh" />
</a> <a class="avatar-link" data-hovercard-type="user" data-hovercard-url="/users/HarshCasper/hovercard" data-octo-click="hovercard-link-click" data-octo-dimensions="link_type:self" href="/HarshCasper/Rotten-Scripts/commits/master/Python/Frames_to_Video_converter/frames_to_video_converter.py?author=HarshCasper">
<img class="avatar mr-2 avatar-user" src="https://avatars.githubusercontent.com/u/47351025?s=48&v=4" width="24" height="24" alt="@HarshCasper" />
</a>
</span>
</div>
</div>
<div data-target="readme-toc.content" class="Box mt-3 position-relative">
<div
class="Box-header js-blob-header py-2 pr-2 d-flex flex-shrink-0 flex-md-row flex-items-center"
>
<div class="text-mono f6 flex-auto pr-3 flex-order-2 flex-md-order-1">
34 lines (26 sloc)
<span class="file-info-divider"></span>
1.2 KB
</div>
<div class="d-flex py-1 py-md-0 flex-auto flex-order-1 flex-md-order-2 flex-sm-grow-0 flex-justify-between hide-sm hide-md">
<div class="BtnGroup">
<a data-permalink-href="/HarshCasper/Rotten-Scripts/raw/00c40043521f526a2e3c203a5830f7c5cc12acb4/Python/Frames_to_Video_converter/frames_to_video_converter.py" href="/HarshCasper/Rotten-Scripts/raw/master/Python/Frames_to_Video_converter/frames_to_video_converter.py" id="raw-url" data-view-component="true" class="js-permalink-replaceable-link btn-sm btn BtnGroup-item"> Raw
</a> <a data-permalink-href="/HarshCasper/Rotten-Scripts/blame/00c40043521f526a2e3c203a5830f7c5cc12acb4/Python/Frames_to_Video_converter/frames_to_video_converter.py" href="/HarshCasper/Rotten-Scripts/blame/master/Python/Frames_to_Video_converter/frames_to_video_converter.py" data-hotkey="b" data-view-component="true" class="js-update-url-with-hash js-permalink-replaceable-link btn-sm btn BtnGroup-item"> Blame
</a> </div>
<div>
<a class="btn-octicon tooltipped tooltipped-nw js-remove-unless-platform"
data-platforms="windows,mac"
href="https://desktop.github.com"
aria-label="Open this file in GitHub Desktop"
data-ga-click="Repository, open with desktop">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-device-desktop">
<path fill-rule="evenodd" d="M1.75 2.5h12.5a.25.25 0 01.25.25v7.5a.25.25 0 01-.25.25H1.75a.25.25 0 01-.25-.25v-7.5a.25.25 0 01.25-.25zM14.25 1H1.75A1.75 1.75 0 000 2.75v7.5C0 11.216.784 12 1.75 12h3.727c-.1 1.041-.52 1.872-1.292 2.757A.75.75 0 004.75 16h6.5a.75.75 0 00.565-1.243c-.772-.885-1.193-1.716-1.292-2.757h3.727A1.75 1.75 0 0016 10.25v-7.5A1.75 1.75 0 0014.25 1zM9.018 12H6.982a5.72 5.72 0 01-.765 2.5h3.566a5.72 5.72 0 01-.765-2.5z"></path>
</svg>
</a>
<remote-clipboard-copy class="d-inline-block btn-octicon" style="height: 26px" data-src="/HarshCasper/Rotten-Scripts/raw/master/Python/Frames_to_Video_converter/frames_to_video_converter.py" data-action="click:remote-clipboard-copy#remoteCopy">
<span data-target="remote-clipboard-copy.idle"> <span class="tooltipped tooltipped-nw cursor-pointer" data-hydro-click="{"event_type":"repository.click","payload":{"target":"COPY_RAW_CONTENTS_BUTTON","repository_id":240786294,"originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="0fcc075015d8c993dcf4b8962bd1a5fdda4f2e7dd7a924efc57c260a00324c03" aria-label="Copy raw contents">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-copy">
<path fill-rule="evenodd" d="M0 6.75C0 5.784.784 5 1.75 5h1.5a.75.75 0 010 1.5h-1.5a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-1.5a.75.75 0 011.5 0v1.5A1.75 1.75 0 019.25 16h-7.5A1.75 1.75 0 010 14.25v-7.5z"></path><path fill-rule="evenodd" d="M5 1.75C5 .784 5.784 0 6.75 0h7.5C15.216 0 16 .784 16 1.75v7.5A1.75 1.75 0 0114.25 11h-7.5A1.75 1.75 0 015 9.25v-7.5zm1.75-.25a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-7.5a.25.25 0 00-.25-.25h-7.5z"></path>
</svg>
</span></span>
<span data-target="remote-clipboard-copy.fetching" hidden="hidden"> <svg style="box-sizing: content-box; color: var(--color-icon-primary);" width="16" height="16" viewBox="0 0 16 16" fill="none" data-view-component="true" class="anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</span>
<span data-target="remote-clipboard-copy.success" hidden="hidden"> <span class="tooltipped tooltipped-nw" aria-label="Copied!">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check color-fg-success">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
</span>
<span data-target="remote-clipboard-copy.error" hidden="hidden"> <span class="tooltipped tooltipped-nw" aria-label="Something went wrong. Try again.">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert color-fg-attention">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
</span>
</span>
</remote-clipboard-copy>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="inline-form js-update-url-with-hash" action="/HarshCasper/Rotten-Scripts/edit/master/Python/Frames_to_Video_converter/frames_to_video_converter.py" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="T33Y80JLJeml3z4GplVO30ylR-nN4cI20UZnWXzPAnUV3Gj88BsLU8oNbYnseik4kpJSMZ-7Kg3NY2SAmiWrWA" />
<button class="btn-octicon tooltipped tooltipped-nw" type="submit"
aria-label="Fork this project and edit the file" data-hotkey="e" data-disable-with>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-pencil">
<path fill-rule="evenodd" d="M11.013 1.427a1.75 1.75 0 012.474 0l1.086 1.086a1.75 1.75 0 010 2.474l-8.61 8.61c-.21.21-.47.364-.756.445l-3.251.93a.75.75 0 01-.927-.928l.929-3.25a1.75 1.75 0 01.445-.758l8.61-8.61zm1.414 1.06a.25.25 0 00-.354 0L10.811 3.75l1.439 1.44 1.263-1.263a.25.25 0 000-.354l-1.086-1.086zM11.189 6.25L9.75 4.81l-6.286 6.287a.25.25 0 00-.064.108l-.558 1.953 1.953-.558a.249.249 0 00.108-.064l6.286-6.286z"></path>
</svg>
</button>
</form>
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="inline-form" action="/HarshCasper/Rotten-Scripts/delete/master/Python/Frames_to_Video_converter/frames_to_video_converter.py" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="_oAxPf2DOcP7iIwyVwuLeHFJxDAMfaN_1fRly0LuuGTBiU8RXg27f0oy_IKfItE86_zpqD5oguzt-F-xceP2AA" />
<button class="btn-octicon btn-octicon-danger tooltipped tooltipped-nw" type="submit"
aria-label="Fork this project and delete the file" data-disable-with>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-trash">
<path fill-rule="evenodd" d="M6.5 1.75a.25.25 0 01.25-.25h2.5a.25.25 0 01.25.25V3h-3V1.75zm4.5 0V3h2.25a.75.75 0 010 1.5H2.75a.75.75 0 010-1.5H5V1.75C5 .784 5.784 0 6.75 0h2.5C10.216 0 11 .784 11 1.75zM4.496 6.675a.75.75 0 10-1.492.15l.66 6.6A1.75 1.75 0 005.405 15h5.19c.9 0 1.652-.681 1.741-1.576l.66-6.6a.75.75 0 00-1.492-.149l-.66 6.6a.25.25 0 01-.249.225h-5.19a.25.25 0 01-.249-.225l-.66-6.6z"></path>
</svg>
</button>
</form> </div>
</div>
<div class="d-flex hide-lg hide-xl flex-order-2 flex-grow-0">
<details class="dropdown details-reset details-overlay d-inline-block">
<summary class="btn-octicon" aria-haspopup="true" aria-label="possible actions">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-kebab-horizontal">
<path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path>
</svg>
</summary>
<ul class="dropdown-menu dropdown-menu-sw" style="width: 175px">
<li>
<a class="dropdown-item tooltipped tooltipped-nw js-remove-unless-platform"
data-platforms="windows,mac"
href="https://desktop.github.com"
data-ga-click="Repository, open with desktop">
Open with Desktop
</a>
</li>
<li>
<a class="dropdown-item" href="/HarshCasper/Rotten-Scripts/raw/master/Python/Frames_to_Video_converter/frames_to_video_converter.py">
View raw
</a>
</li>
<li>
<remote-clipboard-copy class="dropdown-item" data-src="/HarshCasper/Rotten-Scripts/raw/master/Python/Frames_to_Video_converter/frames_to_video_converter.py" data-action="click:remote-clipboard-copy#remoteCopy">
<span data-target="remote-clipboard-copy.idle"> <span class="cursor-pointer" data-hydro-click="{"event_type":"repository.click","payload":{"target":"COPY_RAW_CONTENTS_BUTTON","repository_id":240786294,"originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="0fcc075015d8c993dcf4b8962bd1a5fdda4f2e7dd7a924efc57c260a00324c03">
Copy raw contents
</span></span>
<span data-target="remote-clipboard-copy.fetching" hidden="hidden"> Copy raw contents
<span class="d-inline-block position-relative" style="top: 3px">
<svg aria-label="fetching contents…" style="box-sizing: content-box; color: var(--color-icon-primary);" width="16" height="16" viewBox="0 0 16 16" fill="none" data-view-component="true" class="anim-rotate">
<circle cx="8" cy="8" r="7" stroke="currentColor" stroke-opacity="0.25" stroke-width="2" vector-effect="non-scaling-stroke" />
<path d="M15 8a7.002 7.002 0 00-7-7" stroke="currentColor" stroke-width="2" stroke-linecap="round" vector-effect="non-scaling-stroke" />
</svg>
</span>
</span>
<span data-target="remote-clipboard-copy.success" hidden="hidden"> Copy raw contents
<svg aria-label="Copied!" role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check color-fg-success">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</span>
<span data-target="remote-clipboard-copy.error" hidden="hidden"> Copy raw contents
<svg aria-label="Something went wrong. Try again." role="img" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert color-fg-attention">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
</span>
</remote-clipboard-copy> </li>
<li>
<a class="dropdown-item" href="/HarshCasper/Rotten-Scripts/blame/master/Python/Frames_to_Video_converter/frames_to_video_converter.py">
View blame
</a>
</li>
<li class="dropdown-divider" role="none"></li>
<li>
<a class="dropdown-item" href="/HarshCasper/Rotten-Scripts/edit/master/Python/Frames_to_Video_converter/frames_to_video_converter.py">Edit file</a>
</li>
<li>
<a class="dropdown-item menu-item-danger" href="/HarshCasper/Rotten-Scripts/delete/master/Python/Frames_to_Video_converter/frames_to_video_converter.py">Delete file</a>
</li>
</ul>
</details>
</div>
</div>
<div itemprop="text" class="Box-body p-0 blob-wrapper data type-python gist-border-0">
<div class="js-check-bidi js-blob-code-container blob-code-content">
<template class="js-file-alert-template">
<div data-view-component="true" class="flash flash-warn flash-full d-flex flex-items-center">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
<span>
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
<a href="https://github.co/hiddenchars" target="_blank">Learn more about bidirectional Unicode characters</a>
</span>
<div data-view-component="true" class="flash-action"> <a href="{{ revealButtonHref }}" data-view-component="true" class="btn-sm btn"> Show hidden characters
</a>
</div>
</div></template>
<template class="js-line-alert-template">
<span aria-label="This line has hidden Unicode characters" data-view-component="true" class="line-alert tooltipped tooltipped-e">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
</span></template>
<table class="highlight tab-size js-file-line-container js-code-nav-container js-tagsearch-file" data-tab-size="8" data-paste-markdown-skip data-tagsearch-lang="Python" data-tagsearch-path="Python/Frames_to_Video_converter/frames_to_video_converter.py">
<tr>
<td id="L1" class="blob-num js-line-number js-code-nav-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>os</span></td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number js-code-nav-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>sys</span></td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number js-code-nav-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code blob-code-inner js-file-line"><span class=pl-k>import</span> <span class=pl-s1>cv2</span></td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number js-code-nav-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number js-code-nav-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code blob-code-inner js-file-line"><span class=pl-c># We store all the images in the folder into a variable(here images).</span></td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number js-code-nav-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Then get the height, and width required for the frames from any image</span></td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number js-code-nav-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code blob-code-inner js-file-line"><span class=pl-c># Use VideoWriter function to initialize a video variable. The four arguments</span></td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number js-code-nav-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code blob-code-inner js-file-line"><span class=pl-c># in VideoWriter is the video file name, fourcc code, fps and dimensions</span></td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number js-code-nav-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code blob-code-inner js-file-line"><span class=pl-c># video.write() writes all the images into the video.</span></td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number js-code-nav-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number js-code-nav-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number js-code-nav-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code blob-code-inner js-file-line"><span class=pl-k>def</span> <span class=pl-en>convert_frames_to_video</span>(<span class=pl-s1>img_folder_path</span>, <span class=pl-s1>fps</span>):</td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number js-code-nav-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>images</span> <span class=pl-c1>=</span> [<span class=pl-s1>img</span> <span class=pl-k>for</span> <span class=pl-s1>img</span> <span class=pl-c1>in</span> <span class=pl-s1>os</span>.<span class=pl-en>listdir</span>(<span class=pl-s1>img_folder_path</span>)]</td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number js-code-nav-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>frame</span> <span class=pl-c1>=</span> <span class=pl-s1>cv2</span>.<span class=pl-en>imread</span>(<span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>join</span>(<span class=pl-s1>img_folder_path</span>, <span class=pl-s1>images</span>[<span class=pl-c1>0</span>]))</td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number js-code-nav-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>height</span>, <span class=pl-s1>width</span>, <span class=pl-s1>layers</span> <span class=pl-c1>=</span> <span class=pl-s1>frame</span>.<span class=pl-s1>shape</span></td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number js-code-nav-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L17" class="blob-num js-line-number js-code-nav-line-number" data-line-number="17"></td>
<td id="LC17" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>video_name</span> <span class=pl-c1>=</span> <span class=pl-en>input</span>(<span class=pl-s>"Enter the video name(just the filename): "</span>)</td>
</tr>
<tr>
<td id="L18" class="blob-num js-line-number js-code-nav-line-number" data-line-number="18"></td>
<td id="LC18" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-c1>not</span> <span class=pl-s1>video_name</span>.<span class=pl-en>endswith</span>(<span class=pl-s>".avi"</span>):</td>
</tr>
<tr>
<td id="L19" class="blob-num js-line-number js-code-nav-line-number" data-line-number="19"></td>
<td id="LC19" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>video_name</span> <span class=pl-c1>=</span> <span class=pl-s1>video_name</span> <span class=pl-c1>+</span> <span class=pl-s>".avi"</span></td>
</tr>
<tr>
<td id="L20" class="blob-num js-line-number js-code-nav-line-number" data-line-number="20"></td>
<td id="LC20" class="blob-code blob-code-inner js-file-line"> <span class=pl-c># fourcc code = 0 gives no warning with files other than .avi</span></td>
</tr>
<tr>
<td id="L21" class="blob-num js-line-number js-code-nav-line-number" data-line-number="21"></td>
<td id="LC21" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>video</span> <span class=pl-c1>=</span> <span class=pl-s1>cv2</span>.<span class=pl-v>VideoWriter</span>(<span class=pl-s1>video_name</span>, <span class=pl-c1>0</span>, <span class=pl-s1>fps</span>, (<span class=pl-s1>width</span>, <span class=pl-s1>height</span>))</td>
</tr>
<tr>
<td id="L22" class="blob-num js-line-number js-code-nav-line-number" data-line-number="22"></td>
<td id="LC22" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L23" class="blob-num js-line-number js-code-nav-line-number" data-line-number="23"></td>
<td id="LC23" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>for</span> <span class=pl-s1>image</span> <span class=pl-c1>in</span> <span class=pl-s1>images</span>:</td>
</tr>
<tr>
<td id="L24" class="blob-num js-line-number js-code-nav-line-number" data-line-number="24"></td>
<td id="LC24" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>video</span>.<span class=pl-en>write</span>(<span class=pl-s1>cv2</span>.<span class=pl-en>imread</span>(<span class=pl-s1>os</span>.<span class=pl-s1>path</span>.<span class=pl-en>join</span>(<span class=pl-s1>img_folder_path</span>, <span class=pl-s1>image</span>)))</td>
</tr>
<tr>
<td id="L25" class="blob-num js-line-number js-code-nav-line-number" data-line-number="25"></td>
<td id="LC25" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L26" class="blob-num js-line-number js-code-nav-line-number" data-line-number="26"></td>
<td id="LC26" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>cv2</span>.<span class=pl-en>destroyAllWindows</span>()</td>
</tr>
<tr>
<td id="L27" class="blob-num js-line-number js-code-nav-line-number" data-line-number="27"></td>
<td id="LC27" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>video</span>.<span class=pl-en>release</span>()</td>
</tr>
<tr>
<td id="L28" class="blob-num js-line-number js-code-nav-line-number" data-line-number="28"></td>
<td id="LC28" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L29" class="blob-num js-line-number js-code-nav-line-number" data-line-number="29"></td>
<td id="LC29" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L30" class="blob-num js-line-number js-code-nav-line-number" data-line-number="30"></td>
<td id="LC30" class="blob-code blob-code-inner js-file-line"><span class=pl-k>if</span> <span class=pl-s1>__name__</span> <span class=pl-c1>==</span> <span class=pl-s>"__main__"</span>:</td>
</tr>
<tr>
<td id="L31" class="blob-num js-line-number js-code-nav-line-number" data-line-number="31"></td>
<td id="LC31" class="blob-code blob-code-inner js-file-line"> <span class=pl-k>if</span> <span class=pl-en>len</span>(<span class=pl-s1>sys</span>.<span class=pl-s1>argv</span>) <span class=pl-c1>></span> <span class=pl-c1>1</span>:</td>
</tr>
<tr>
<td id="L32" class="blob-num js-line-number js-code-nav-line-number" data-line-number="32"></td>
<td id="LC32" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>img_folder_path</span> <span class=pl-c1>=</span> <span class=pl-en>str</span>(<span class=pl-s>" "</span>.<span class=pl-en>join</span>(<span class=pl-s1>sys</span>.<span class=pl-s1>argv</span>[<span class=pl-c1>1</span>:]))</td>
</tr>
<tr>
<td id="L33" class="blob-num js-line-number js-code-nav-line-number" data-line-number="33"></td>
<td id="LC33" class="blob-code blob-code-inner js-file-line"> <span class=pl-s1>fps</span> <span class=pl-c1>=</span> <span class=pl-en>int</span>(<span class=pl-en>input</span>(<span class=pl-s>"Enter the fps needed: "</span>))</td>
</tr>
<tr>
<td id="L34" class="blob-num js-line-number js-code-nav-line-number" data-line-number="34"></td>
<td id="LC34" class="blob-code blob-code-inner js-file-line"> <span class=pl-en>convert_frames_to_video</span>(<span class=pl-s1>img_folder_path</span>, <span class=pl-s1>fps</span>)</td>
</tr>
</table>
</div>
<details class="details-reset details-overlay BlobToolbar position-absolute js-file-line-actions dropdown d-none" aria-hidden="true">
<summary class="btn-octicon ml-0 px-2 p-0 color-bg-default border color-border-default rounded-1" aria-label="Inline file action toolbar">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-kebab-horizontal">
<path d="M8 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zM1.5 9a1.5 1.5 0 100-3 1.5 1.5 0 000 3zm13 0a1.5 1.5 0 100-3 1.5 1.5 0 000 3z"></path>
</svg>
</summary>
<details-menu>
<ul class="BlobToolbar-dropdown dropdown-menu dropdown-menu-se ml-2 mt-2"
style="width:220px"
>
<li>
<clipboard-copy role="menuitem" class="dropdown-item" id="js-copy-lines" style="cursor:pointer;" aria-label="Copy lines">
Copy lines
</clipboard-copy>
</li>
<li>
<clipboard-copy role="menuitem" class="dropdown-item" id="js-copy-permalink" style="cursor:pointer;" aria-label="Copy permalink">
Copy permalink
</clipboard-copy>
</li>
<li><a class="dropdown-item js-update-url-with-hash" id="js-view-git-blame" role="menuitem" href="/HarshCasper/Rotten-Scripts/blame/00c40043521f526a2e3c203a5830f7c5cc12acb4/Python/Frames_to_Video_converter/frames_to_video_converter.py">View git blame</a></li>
<li><a class="dropdown-item" id="js-new-issue" role="menuitem" href="/HarshCasper/Rotten-Scripts/issues/new">Reference in new issue</a></li>
<li><a class="dropdown-item" id="js-new-discussion" role="menuitem" href="/HarshCasper/Rotten-Scripts/discussions/new">Reference in new discussion</a></li>
</ul>
</details-menu>
</details>
</div>
</div>
<details class="details-reset details-overlay details-overlay-dark" id="jumpto-line-details-dialog">
<summary data-hotkey="l" aria-label="Jump to line"></summary>
<details-dialog class="Box Box--overlay d-flex flex-column anim-fade-in fast linejump" aria-label="Jump to line">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form class="js-jump-to-line-form Box-body d-flex" action="" accept-charset="UTF-8" method="get">
<input class="form-control flex-auto mr-3 linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
<button data-close-dialog="" type="submit" data-view-component="true" class="btn"> Go
</button>
</form> </details-dialog>
</details>
<div class="Popover anim-scale-in js-tagsearch-popover"
hidden
data-tagsearch-url="/HarshCasper/Rotten-Scripts/find-definition"
data-tagsearch-ref="master"
data-tagsearch-code-nav-context="BLOB_VIEW">
<div class="Popover-message Popover-message--large Popover-message--top-left TagsearchPopover mt-1 mb-4 mx-auto Box color-shadow-large">
<div class="TagsearchPopover-content js-tagsearch-popover-content overflow-auto" style="will-change:transform;">
</div>
</div>
</div>
</div>
</div>
</div>
</main>
</div>
</div>
<footer class="footer width-full container-xl p-responsive" role="contentinfo">
<div class="position-relative d-flex flex-items-center pb-2 f6 color-fg-muted border-top color-border-muted flex-column-reverse flex-lg-row flex-wrap flex-lg-nowrap mt-6 pt-6">
<ul class="list-style-none d-flex flex-wrap col-0 col-lg-2 flex-justify-start flex-lg-justify-between mb-2 mb-lg-0">
<li class="mt-2 mt-lg-0 d-flex flex-items-center">
<a aria-label="Homepage" title="GitHub" class="footer-octicon mr-2" href="https://github.com">
<svg aria-hidden="true" height="24" viewBox="0 0 16 16" version="1.1" width="24" data-view-component="true" class="octicon octicon-mark-github">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path>
</svg>
</a> <span>
© 2022 GitHub, Inc.
</span>
</li>
</ul>
<ul class="list-style-none d-flex flex-wrap col-12 col-lg-8 flex-justify-center flex-lg-justify-between mb-2 mb-lg-0">
<li class="mr-3 mr-lg-0"><a href="https://docs.github.com/en/github/site-policy/github-terms-of-service" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to terms","label":"text:terms","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="07ea8da7bed358ca489e88517c060f9a69d0dbb8c2315e212db029491584d737" data-analytics-event="{"category":"Footer","action":"go to terms","label":"text:terms"}">Terms</a></li>
<li class="mr-3 mr-lg-0"><a href="https://docs.github.com/en/github/site-policy/github-privacy-statement" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to privacy","label":"text:privacy","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="3a1035cdbb817d374874877ebdb42ffb0728593aa7609a059c9bb186b454ef2e" data-analytics-event="{"category":"Footer","action":"go to privacy","label":"text:privacy"}">Privacy</a></li>
<li class="mr-3 mr-lg-0"><a data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to security","label":"text:security","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="e72db72356d55bd8789fcf07223316eed5fbec5221d396e2c44e1f540269630c" data-analytics-event="{"category":"Footer","action":"go to security","label":"text:security"}" href="https://github.com/security">Security</a></li>
<li class="mr-3 mr-lg-0"><a href="https://www.githubstatus.com/" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to status","label":"text:status","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="14ce511eb34a6c3054ba59dbd1e09f7c9559a422f13fa004fff2f6a4989c4814" data-analytics-event="{"category":"Footer","action":"go to status","label":"text:status"}">Status</a></li>
<li class="mr-3 mr-lg-0"><a data-ga-click="Footer, go to help, text:Docs" href="https://docs.github.com">Docs</a></li>
<li class="mr-3 mr-lg-0"><a href="https://support.github.com?tags=dotcom-footer" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to contact","label":"text:contact","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="cd36d3ba70d547bd6ed440d1b80ca53a64b5ba7606e486590979eb43e3dc9407" data-analytics-event="{"category":"Footer","action":"go to contact","label":"text:contact"}">Contact GitHub</a></li>
<li class="mr-3 mr-lg-0"><a href="https://github.com/pricing" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to Pricing","label":"text:Pricing","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="b856ceb7c332eb8a03a972f374aca4672cc3aa72ee34dccd7ad29366e9816cbc" data-analytics-event="{"category":"Footer","action":"go to Pricing","label":"text:Pricing"}">Pricing</a></li>
<li class="mr-3 mr-lg-0"><a href="https://docs.github.com" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to api","label":"text:api","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="e5e649e283ef5635aa8ba650c1888bff111951956768f537da45585ef80a1e4d" data-analytics-event="{"category":"Footer","action":"go to api","label":"text:api"}">API</a></li>
<li class="mr-3 mr-lg-0"><a href="https://services.github.com" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to training","label":"text:training","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="2110d6cf90da66260f11dd4104d6bf7a4ef31edd318ba3d83b6d1adce4cc3f42" data-analytics-event="{"category":"Footer","action":"go to training","label":"text:training"}">Training</a></li>
<li class="mr-3 mr-lg-0"><a href="https://github.blog" data-hydro-click="{"event_type":"analytics.event","payload":{"category":"Footer","action":"go to blog","label":"text:blog","originating_url":"https://github.com/HarshCasper/Rotten-Scripts/blob/master/Python/Frames_to_Video_converter/frames_to_video_converter.py","user_id":19616734}}" data-hydro-click-hmac="e678d440fb97aea24aa6ff0e4fd00d957a9b2675f8dbd4fbf389093bf6029b2e" data-analytics-event="{"category":"Footer","action":"go to blog","label":"text:blog"}">Blog</a></li>
<li><a data-ga-click="Footer, go to about, text:about" href="https://github.com/about">About</a></li>
</ul>
</div>
<div class="d-flex flex-justify-center pb-6">
<span class="f6 color-fg-muted"></span>
</div>
</footer>
<div id="ajax-error-message" class="ajax-error-message flash flash-error" hidden>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
<button type="button" class="flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
You can’t perform that action at this time.
</div>
<div class="js-stale-session-flash flash flash-warn flash-banner" hidden
>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-alert">
<path fill-rule="evenodd" d="M8.22 1.754a.25.25 0 00-.44 0L1.698 13.132a.25.25 0 00.22.368h12.164a.25.25 0 00.22-.368L8.22 1.754zm-1.763-.707c.659-1.234 2.427-1.234 3.086 0l6.082 11.378A1.75 1.75 0 0114.082 15H1.918a1.75 1.75 0 01-1.543-2.575L6.457 1.047zM9 11a1 1 0 11-2 0 1 1 0 012 0zm-.25-5.25a.75.75 0 00-1.5 0v2.5a.75.75 0 001.5 0v-2.5z"></path>
</svg>
<span class="js-stale-session-flash-signed-in" hidden>You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
<span class="js-stale-session-flash-signed-out" hidden>You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
</div>
<template id="site-details-dialog">
<details class="details-reset details-overlay details-overlay-dark lh-default color-fg-default hx_rsm" open>
<summary role="button" aria-label="Close dialog"></summary>
<details-dialog class="Box Box--overlay d-flex flex-column anim-fade-in fast hx_rsm-dialog hx_rsm-modal">
<button class="Box-btn-octicon m-0 btn-octicon position-absolute right-0 top-0" type="button" aria-label="Close dialog" data-close-dialog>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-x">
<path fill-rule="evenodd" d="M3.72 3.72a.75.75 0 011.06 0L8 6.94l3.22-3.22a.75.75 0 111.06 1.06L9.06 8l3.22 3.22a.75.75 0 11-1.06 1.06L8 9.06l-3.22 3.22a.75.75 0 01-1.06-1.06L6.94 8 3.72 4.78a.75.75 0 010-1.06z"></path>
</svg>
</button>
<div class="octocat-spinner my-6 js-details-dialog-spinner"></div>
</details-dialog>
</details>
</template>
<div class="Popover js-hovercard-content position-absolute" style="display: none; outline: none;" tabindex="0">
<div class="Popover-message Popover-message--bottom-left Popover-message--large Box color-shadow-large" style="width:360px;">
</div>
</div>
<template id="snippet-clipboard-copy-button">
<div class="zeroclipboard-container position-absolute right-0 top-0">
<clipboard-copy aria-label="Copy" class="ClipboardButton btn js-clipboard-copy m-2 p-0 tooltipped-no-delay" data-copy-feedback="Copied!" data-tooltip-direction="w">
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-copy js-clipboard-copy-icon m-2">
<path fill-rule="evenodd" d="M0 6.75C0 5.784.784 5 1.75 5h1.5a.75.75 0 010 1.5h-1.5a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-1.5a.75.75 0 011.5 0v1.5A1.75 1.75 0 019.25 16h-7.5A1.75 1.75 0 010 14.25v-7.5z"></path><path fill-rule="evenodd" d="M5 1.75C5 .784 5.784 0 6.75 0h7.5C15.216 0 16 .784 16 1.75v7.5A1.75 1.75 0 0114.25 11h-7.5A1.75 1.75 0 015 9.25v-7.5zm1.75-.25a.25.25 0 00-.25.25v7.5c0 .138.112.25.25.25h7.5a.25.25 0 00.25-.25v-7.5a.25.25 0 00-.25-.25h-7.5z"></path>
</svg>
<svg aria-hidden="true" height="16" viewBox="0 0 16 16" version="1.1" width="16" data-view-component="true" class="octicon octicon-check js-clipboard-check-icon color-fg-success d-none m-2">
<path fill-rule="evenodd" d="M13.78 4.22a.75.75 0 010 1.06l-7.25 7.25a.75.75 0 01-1.06 0L2.22 9.28a.75.75 0 011.06-1.06L6 10.94l6.72-6.72a.75.75 0 011.06 0z"></path>
</svg>
</clipboard-copy>
</div>
</template>
<style>
.user-mention[href$="/keeganskeate"] {
color: var(--color-user-mention-fg);
background-color: var(--color-user-mention-bg);
border-radius: 2px;
margin-left: -2px;
margin-right: -2px;
padding: 0 2px;
}
</style>
</body>
</html>
| 84.60704
| 17,489
| 0.677099
|
88e639f2ce3e6beeeea2808f9fdaaea874740709
| 2,349
|
py
|
Python
|
config/settings/local.py
|
salah-hegazi/datadog-alert-system
|
2bc6fc32eb11708b0b6b3c6ea2ef9f466110ef35
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
salah-hegazi/datadog-alert-system
|
2bc6fc32eb11708b0b6b3c6ea2ef9f466110ef35
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
salah-hegazi/datadog-alert-system
|
2bc6fc32eb11708b0b6b3c6ea2ef9f466110ef35
|
[
"MIT"
] | null | null | null |
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="CQUvFK4F5nasn4B1o2ZiB5JWEwnJp9Nvks9ziZ4A7n9Ay600m3hrK2z3YGIP6cbm",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
if env("USE_DOCKER") == "yes":
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| 39.813559
| 97
| 0.58493
|
6362963ee246041e4403414c6e271219ef5c61d1
| 4,462
|
py
|
Python
|
cooperative_action_control/Multi-agents_cooperation/runner.py
|
gingkg/man-machine_counteraction
|
ca61aeee046c1fcf11adba4f7c782a5f71d7de2e
|
[
"MIT"
] | null | null | null |
cooperative_action_control/Multi-agents_cooperation/runner.py
|
gingkg/man-machine_counteraction
|
ca61aeee046c1fcf11adba4f7c782a5f71d7de2e
|
[
"MIT"
] | null | null | null |
cooperative_action_control/Multi-agents_cooperation/runner.py
|
gingkg/man-machine_counteraction
|
ca61aeee046c1fcf11adba4f7c782a5f71d7de2e
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
from common.rollout import RolloutWorker, CommRolloutWorker
from agent.agent import Agents, CommAgents
from common.replay_buffer import ReplayBuffer
import matplotlib.pyplot as plt
class Runner:
def __init__(self, env, args):
self.env = env
if args.alg.find('commnet') > -1 or args.alg.find('g2anet') > -1: # communication agent
self.agents = CommAgents(args)
self.rolloutWorker = CommRolloutWorker(env, self.agents, args)
else: # no communication agent
self.agents = Agents(args)
self.rolloutWorker = RolloutWorker(env, self.agents, args)
if not args.evaluate and args.alg.find('coma') == -1 and args.alg.find('central_v') == -1 and args.alg.find('reinforce') == -1: # these 3 algorithms are on-poliy
self.buffer = ReplayBuffer(args)
self.args = args
self.win_rates = []
self.episode_rewards = []
# 用来保存plt和pkl
self.save_path = self.args.result_dir + '/' + args.alg + '/' + args.map
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def run(self, num):
time_steps, train_steps, evaluate_steps = 0, 0, -1
while time_steps < self.args.n_steps:
print('Run {}, time_steps {}'.format(num, time_steps))
if time_steps // self.args.evaluate_cycle > evaluate_steps:
win_rate, episode_reward = self.evaluate()
# print('win_rate is ', win_rate)
self.win_rates.append(win_rate)
self.episode_rewards.append(episode_reward)
self.plt(num)
evaluate_steps += 1
episodes = []
# 收集self.args.n_episodes个episodes
for episode_idx in range(self.args.n_episodes):
episode, _, _, steps = self.rolloutWorker.generate_episode(episode_idx)
episodes.append(episode)
time_steps += steps
# print(_)
# episode的每一项都是一个(1, episode_len, n_agents, 具体维度)四维数组,下面要把所有episode的的obs拼在一起
episode_batch = episodes[0]
episodes.pop(0)
for episode in episodes:
for key in episode_batch.keys():
episode_batch[key] = np.concatenate((episode_batch[key], episode[key]), axis=0)
if self.args.alg.find('coma') > -1 or self.args.alg.find('central_v') > -1 or self.args.alg.find('reinforce') > -1:
self.agents.train(episode_batch, train_steps, self.rolloutWorker.epsilon)
train_steps += 1
else:
self.buffer.store_episode(episode_batch)
for train_step in range(self.args.train_steps):
mini_batch = self.buffer.sample(min(self.buffer.current_size, self.args.batch_size))
self.agents.train(mini_batch, train_steps)
train_steps += 1
win_rate, episode_reward = self.evaluate()
print('win_rate is ', win_rate)
self.win_rates.append(win_rate)
self.episode_rewards.append(episode_reward)
self.plt(num)
def evaluate(self):
win_number = 0
episode_rewards = 0
for epoch in range(self.args.evaluate_epoch):
_, episode_reward, win_tag, _ = self.rolloutWorker.generate_episode(epoch, evaluate=True)
episode_rewards += episode_reward
if win_tag:
win_number += 1
return win_number / self.args.evaluate_epoch, episode_rewards / self.args.evaluate_epoch
def plt(self, num):
plt.figure()
plt.ylim([0, 105])
plt.cla()
# 调整子图上下边距
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.subplot(2, 1, 1)
plt.plot(range(len(self.win_rates)), self.win_rates)
plt.xlabel('step*{}'.format(self.args.evaluate_cycle))
plt.ylabel('win_rates')
plt.subplot(2, 1, 2)
plt.plot(range(len(self.episode_rewards)), self.episode_rewards)
plt.xlabel('step*{}'.format(self.args.evaluate_cycle))
plt.ylabel('episode_rewards')
plt.savefig(self.save_path + '/plt_{}.png'.format(num), format='png')
np.save(self.save_path + '/win_rates_{}'.format(num), self.win_rates)
np.save(self.save_path + '/episode_rewards_{}'.format(num), self.episode_rewards)
plt.close()
| 43.320388
| 170
| 0.609144
|
4f9fc2fbe5160df24cf35bfb4333b03177e504b5
| 2,999
|
py
|
Python
|
tests/test_Personal_Fitness.py
|
JI511/Personal_Fitness
|
25d54908398caf9291e70069dca97a567b1bd94b
|
[
"MIT"
] | null | null | null |
tests/test_Personal_Fitness.py
|
JI511/Personal_Fitness
|
25d54908398caf9291e70069dca97a567b1bd94b
|
[
"MIT"
] | 75
|
2019-04-01T01:57:34.000Z
|
2019-09-26T00:14:28.000Z
|
tests/test_Personal_Fitness.py
|
JI511/Personal_Fitness
|
25d54908398caf9291e70069dca97a567b1bd94b
|
[
"MIT"
] | 1
|
2019-08-11T07:25:15.000Z
|
2019-08-11T07:25:15.000Z
|
# ----------------------------------------------------------------------------------------------------------------------
# Personal Fitness unit tests
# ----------------------------------------------------------------------------------------------------------------------
# imports
import unittest
import tempfile
import os
import shutil
import datetime
from src import Personal_Fitness
class TestBodyWeightProcedure(unittest.TestCase):
"""
Class for testing the body weight procedure.
"""
def setUp(self):
"""
Initializes unit test variables.
"""
self.logs_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.logs_dir, 'backup_db'))
self.database_name = 'test_database.db'
self.application = Personal_Fitness.PersonalFitness(
database_path=os.path.join(self.logs_dir, self.database_name),
log_name='test_log.log',
backup_path=self.logs_dir,
config_path=self.logs_dir
)
self.input_values = []
def mock_input(_):
"""
Fake input function in order to test input calls in unit tests.
"""
return self.input_values.pop(0)
Personal_Fitness.input = mock_input
def tearDown(self):
"""
Performs any clean up needed.
"""
self.application.connection = None
if os.path.exists(self.logs_dir):
shutil.rmtree(self.logs_dir)
# ------------------------------------------------------------------------------------------------------------------
# backup db tests
# ------------------------------------------------------------------------------------------------------------------
def test_create_backup_db_nominal(self):
"""
Creates a copy of database file and stores it in the backup_db folder.
"""
self.input_values = ['5', 'q']
self.application.run()
date = datetime.datetime.now().strftime('%m_%d')
backup_folder = os.path.join(self.logs_dir, 'backup_db')
path = os.path.join(backup_folder, '%s_%s.db' % (self.database_name[:-3], date))
self.assertTrue(os.path.exists(path))
def test_no_backup_folder(self):
"""
Creates the backup db folder in the cwd if it does not already exist.
"""
shutil.rmtree(os.path.join(self.logs_dir, 'backup_db'))
self.input_values = ['5', 'q']
self.application.run()
date = datetime.datetime.now().strftime('%m_%d')
backup_folder = os.path.join(self.logs_dir, 'backup_db')
path = os.path.join(backup_folder, '%s_%s.db' % (self.database_name[:-3], date))
self.assertTrue(os.path.exists(path))
# ----------------------------------------------------------------------------------------------------------------------
# End
# ----------------------------------------------------------------------------------------------------------------------
| 38.448718
| 120
| 0.463488
|
1042ffe1734b5923d92fecbf1d03947c9b6f4e7c
| 1,291
|
py
|
Python
|
Geometry/CaloEventSetup/test/aliFile2Screen_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Geometry/CaloEventSetup/test/aliFile2Screen_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Geometry/CaloEventSetup/test/aliFile2Screen_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("read")
process.load('CondCore.CondDB.CondDB_cfi')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.CondDB.connect = cms.string('sqlite_file:myfile.db')
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDB,
toGet = cms.VPSet(
cms.PSet(
record = cms.string('EBAlignmentRcd'),
tag = cms.string('EB')
),
cms.PSet(
record = cms.string('EEAlignmentRcd'),
tag = cms.string('EE')
),
cms.PSet(
record = cms.string('ESAlignmentRcd'),
tag = cms.string('ES')
)
)
)
##
## Please, rebuild the test with debug enabled
## USER_CXXFLAGS="-g -D=EDM_ML_DEBUG" scram b -v # for bash
## env USER_CXXFLAGS="-g -D=EDM_ML_DEBUG" scram b -v # for tcsh
##
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.cerr.noTimeStamps = cms.untracked.bool(True)
process.MessageLogger.debugModules = cms.untracked.vstring('CaloAlignmentRcdRead')
process.CaloAlignmentRcdRead = cms.EDAnalyzer("CaloAlignmentRcdRead")
process.p = cms.Path(process.CaloAlignmentRcdRead)
| 29.340909
| 82
| 0.699458
|
93b7738e65a02ff891827c02c3bf74664d3a2bf5
| 106,972
|
py
|
Python
|
tccli/services/cvm/cvm_client.py
|
zyh911/tencentcloud-cli
|
dfc5dbd660d4c60d265921c4edc630091478fc41
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/cvm/cvm_client.py
|
zyh911/tencentcloud-cli
|
dfc5dbd660d4c60d265921c4edc630091478fc41
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/cvm/cvm_client.py
|
zyh911/tencentcloud-cli
|
dfc5dbd660d4c60d265921c4edc630091478fc41
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.cvm.v20170312 import cvm_client as cvm_client_v20170312
from tencentcloud.cvm.v20170312 import models as models_v20170312
from tccli.services.cvm import v20170312
from tccli.services.cvm.v20170312 import help as v20170312_help
def doDescribeImageQuota(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeImageQuota", g_param[OptionsDefine.Version])
return
param = {
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeImageQuotaRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeImageQuota(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doStopInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("StopInstances", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"ForceStop": Utils.try_to_json(argv, "--ForceStop"),
"StopType": argv.get("--StopType"),
"StoppedMode": argv.get("--StoppedMode"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StopInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.StopInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInstancesStatus(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeInstancesStatus", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInstancesStatusRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeInstancesStatus(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyImageSharePermission(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyImageSharePermission", g_param[OptionsDefine.Version])
return
param = {
"ImageId": argv.get("--ImageId"),
"AccountIds": Utils.try_to_json(argv, "--AccountIds"),
"Permission": argv.get("--Permission"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyImageSharePermissionRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyImageSharePermission(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeImageSharePermission(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeImageSharePermission", g_param[OptionsDefine.Version])
return
param = {
"ImageId": argv.get("--ImageId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeImageSharePermissionRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeImageSharePermission(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doInquiryPriceModifyInstancesChargeType(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("InquiryPriceModifyInstancesChargeType", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceChargeType": argv.get("--InstanceChargeType"),
"InstanceChargePrepaid": Utils.try_to_json(argv, "--InstanceChargePrepaid"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.InquiryPriceModifyInstancesChargeTypeRequest()
model.from_json_string(json.dumps(param))
rsp = client.InquiryPriceModifyInstancesChargeType(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyHostsAttribute(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyHostsAttribute", g_param[OptionsDefine.Version])
return
param = {
"HostIds": Utils.try_to_json(argv, "--HostIds"),
"HostName": argv.get("--HostName"),
"RenewFlag": argv.get("--RenewFlag"),
"ProjectId": Utils.try_to_json(argv, "--ProjectId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyHostsAttributeRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyHostsAttribute(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeImages(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeImages", g_param[OptionsDefine.Version])
return
param = {
"ImageIds": Utils.try_to_json(argv, "--ImageIds"),
"Filters": Utils.try_to_json(argv, "--Filters"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"InstanceType": argv.get("--InstanceType"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeImagesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeImages(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyKeyPairAttribute(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyKeyPairAttribute", g_param[OptionsDefine.Version])
return
param = {
"KeyId": argv.get("--KeyId"),
"KeyName": argv.get("--KeyName"),
"Description": argv.get("--Description"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyKeyPairAttributeRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyKeyPairAttribute(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeZoneInstanceConfigInfos(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeZoneInstanceConfigInfos", g_param[OptionsDefine.Version])
return
param = {
"Filters": Utils.try_to_json(argv, "--Filters"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeZoneInstanceConfigInfosRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeZoneInstanceConfigInfos(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyInstancesAttribute(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyInstancesAttribute", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceName": argv.get("--InstanceName"),
"SecurityGroups": Utils.try_to_json(argv, "--SecurityGroups"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyInstancesAttributeRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyInstancesAttribute(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRegions(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeRegions", g_param[OptionsDefine.Version])
return
param = {
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRegionsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeRegions(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doInquiryPriceResetInstancesInternetMaxBandwidth(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("InquiryPriceResetInstancesInternetMaxBandwidth", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InternetAccessible": Utils.try_to_json(argv, "--InternetAccessible"),
"StartTime": argv.get("--StartTime"),
"EndTime": argv.get("--EndTime"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.InquiryPriceResetInstancesInternetMaxBandwidthRequest()
model.from_json_string(json.dumps(param))
rsp = client.InquiryPriceResetInstancesInternetMaxBandwidth(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDisassociateInstancesKeyPairs(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DisassociateInstancesKeyPairs", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"KeyIds": Utils.try_to_json(argv, "--KeyIds"),
"ForceStop": Utils.try_to_json(argv, "--ForceStop"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DisassociateInstancesKeyPairsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DisassociateInstancesKeyPairs(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateKeyPair(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateKeyPair", g_param[OptionsDefine.Version])
return
param = {
"KeyName": argv.get("--KeyName"),
"ProjectId": Utils.try_to_json(argv, "--ProjectId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateKeyPairRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateKeyPair(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteKeyPairs(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteKeyPairs", g_param[OptionsDefine.Version])
return
param = {
"KeyIds": Utils.try_to_json(argv, "--KeyIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteKeyPairsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteKeyPairs(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateDisasterRecoverGroup(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateDisasterRecoverGroup", g_param[OptionsDefine.Version])
return
param = {
"Name": argv.get("--Name"),
"Type": argv.get("--Type"),
"ClientToken": argv.get("--ClientToken"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateDisasterRecoverGroupRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateDisasterRecoverGroup(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeInstances", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"Filters": Utils.try_to_json(argv, "--Filters"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doImportKeyPair(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ImportKeyPair", g_param[OptionsDefine.Version])
return
param = {
"KeyName": argv.get("--KeyName"),
"ProjectId": Utils.try_to_json(argv, "--ProjectId"),
"PublicKey": argv.get("--PublicKey"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ImportKeyPairRequest()
model.from_json_string(json.dumps(param))
rsp = client.ImportKeyPair(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doSyncImages(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("SyncImages", g_param[OptionsDefine.Version])
return
param = {
"ImageIds": Utils.try_to_json(argv, "--ImageIds"),
"DestinationRegions": Utils.try_to_json(argv, "--DestinationRegions"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.SyncImagesRequest()
model.from_json_string(json.dumps(param))
rsp = client.SyncImages(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInstanceInternetBandwidthConfigs(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeInstanceInternetBandwidthConfigs", g_param[OptionsDefine.Version])
return
param = {
"InstanceId": argv.get("--InstanceId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInstanceInternetBandwidthConfigsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeInstanceInternetBandwidthConfigs(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAssociateInstancesKeyPairs(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("AssociateInstancesKeyPairs", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"KeyIds": Utils.try_to_json(argv, "--KeyIds"),
"ForceStop": Utils.try_to_json(argv, "--ForceStop"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AssociateInstancesKeyPairsRequest()
model.from_json_string(json.dumps(param))
rsp = client.AssociateInstancesKeyPairs(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRunInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("RunInstances", g_param[OptionsDefine.Version])
return
param = {
"Placement": Utils.try_to_json(argv, "--Placement"),
"ImageId": argv.get("--ImageId"),
"InstanceChargeType": argv.get("--InstanceChargeType"),
"InstanceChargePrepaid": Utils.try_to_json(argv, "--InstanceChargePrepaid"),
"InstanceType": argv.get("--InstanceType"),
"SystemDisk": Utils.try_to_json(argv, "--SystemDisk"),
"DataDisks": Utils.try_to_json(argv, "--DataDisks"),
"VirtualPrivateCloud": Utils.try_to_json(argv, "--VirtualPrivateCloud"),
"InternetAccessible": Utils.try_to_json(argv, "--InternetAccessible"),
"InstanceCount": Utils.try_to_json(argv, "--InstanceCount"),
"InstanceName": argv.get("--InstanceName"),
"LoginSettings": Utils.try_to_json(argv, "--LoginSettings"),
"SecurityGroupIds": Utils.try_to_json(argv, "--SecurityGroupIds"),
"EnhancedService": Utils.try_to_json(argv, "--EnhancedService"),
"ClientToken": argv.get("--ClientToken"),
"HostName": argv.get("--HostName"),
"ActionTimer": Utils.try_to_json(argv, "--ActionTimer"),
"DisasterRecoverGroupIds": Utils.try_to_json(argv, "--DisasterRecoverGroupIds"),
"TagSpecification": Utils.try_to_json(argv, "--TagSpecification"),
"InstanceMarketOptions": Utils.try_to_json(argv, "--InstanceMarketOptions"),
"UserData": argv.get("--UserData"),
"DryRun": Utils.try_to_json(argv, "--DryRun"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RunInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.RunInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteImages(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteImages", g_param[OptionsDefine.Version])
return
param = {
"ImageIds": Utils.try_to_json(argv, "--ImageIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteImagesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteImages(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doInquiryPriceResizeInstanceDisks(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("InquiryPriceResizeInstanceDisks", g_param[OptionsDefine.Version])
return
param = {
"InstanceId": argv.get("--InstanceId"),
"DataDisks": Utils.try_to_json(argv, "--DataDisks"),
"ForceStop": Utils.try_to_json(argv, "--ForceStop"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.InquiryPriceResizeInstanceDisksRequest()
model.from_json_string(json.dumps(param))
rsp = client.InquiryPriceResizeInstanceDisks(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doTerminateInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("TerminateInstances", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.TerminateInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.TerminateInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyInstancesVpcAttribute(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyInstancesVpcAttribute", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"VirtualPrivateCloud": Utils.try_to_json(argv, "--VirtualPrivateCloud"),
"ForceStop": Utils.try_to_json(argv, "--ForceStop"),
"ReserveHostName": Utils.try_to_json(argv, "--ReserveHostName"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyInstancesVpcAttributeRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyInstancesVpcAttribute(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doInquiryPriceResetInstance(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("InquiryPriceResetInstance", g_param[OptionsDefine.Version])
return
param = {
"InstanceId": argv.get("--InstanceId"),
"ImageId": argv.get("--ImageId"),
"SystemDisk": Utils.try_to_json(argv, "--SystemDisk"),
"LoginSettings": Utils.try_to_json(argv, "--LoginSettings"),
"EnhancedService": Utils.try_to_json(argv, "--EnhancedService"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.InquiryPriceResetInstanceRequest()
model.from_json_string(json.dumps(param))
rsp = client.InquiryPriceResetInstance(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDisasterRecoverGroupQuota(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeDisasterRecoverGroupQuota", g_param[OptionsDefine.Version])
return
param = {
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDisasterRecoverGroupQuotaRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeDisasterRecoverGroupQuota(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doResetInstancesPassword(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ResetInstancesPassword", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"Password": argv.get("--Password"),
"UserName": argv.get("--UserName"),
"ForceStop": Utils.try_to_json(argv, "--ForceStop"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ResetInstancesPasswordRequest()
model.from_json_string(json.dumps(param))
rsp = client.ResetInstancesPassword(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyInstancesRenewFlag(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyInstancesRenewFlag", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"RenewFlag": argv.get("--RenewFlag"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyInstancesRenewFlagRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyInstancesRenewFlag(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doPurchaseReservedInstancesOffering(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("PurchaseReservedInstancesOffering", g_param[OptionsDefine.Version])
return
param = {
"InstanceCount": Utils.try_to_json(argv, "--InstanceCount"),
"ReservedInstancesOfferingId": argv.get("--ReservedInstancesOfferingId"),
"DryRun": Utils.try_to_json(argv, "--DryRun"),
"ClientToken": argv.get("--ClientToken"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.PurchaseReservedInstancesOfferingRequest()
model.from_json_string(json.dumps(param))
rsp = client.PurchaseReservedInstancesOffering(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doResizeInstanceDisks(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ResizeInstanceDisks", g_param[OptionsDefine.Version])
return
param = {
"InstanceId": argv.get("--InstanceId"),
"DataDisks": Utils.try_to_json(argv, "--DataDisks"),
"ForceStop": Utils.try_to_json(argv, "--ForceStop"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ResizeInstanceDisksRequest()
model.from_json_string(json.dumps(param))
rsp = client.ResizeInstanceDisks(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeReservedInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeReservedInstances", g_param[OptionsDefine.Version])
return
param = {
"DryRun": Utils.try_to_json(argv, "--DryRun"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Filters": Utils.try_to_json(argv, "--Filters"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeReservedInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeReservedInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeZones(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeZones", g_param[OptionsDefine.Version])
return
param = {
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeZonesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeZones(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateImage(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateImage", g_param[OptionsDefine.Version])
return
param = {
"ImageName": argv.get("--ImageName"),
"InstanceId": argv.get("--InstanceId"),
"ImageDescription": argv.get("--ImageDescription"),
"ForcePoweroff": argv.get("--ForcePoweroff"),
"Sysprep": argv.get("--Sysprep"),
"DataDiskIds": Utils.try_to_json(argv, "--DataDiskIds"),
"SnapshotIds": Utils.try_to_json(argv, "--SnapshotIds"),
"DryRun": Utils.try_to_json(argv, "--DryRun"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateImageRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateImage(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAssociateSecurityGroups(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("AssociateSecurityGroups", g_param[OptionsDefine.Version])
return
param = {
"SecurityGroupIds": Utils.try_to_json(argv, "--SecurityGroupIds"),
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AssociateSecurityGroupsRequest()
model.from_json_string(json.dumps(param))
rsp = client.AssociateSecurityGroups(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doResetInstancesType(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ResetInstancesType", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceType": argv.get("--InstanceType"),
"ForceStop": Utils.try_to_json(argv, "--ForceStop"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ResetInstancesTypeRequest()
model.from_json_string(json.dumps(param))
rsp = client.ResetInstancesType(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyImageAttribute(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyImageAttribute", g_param[OptionsDefine.Version])
return
param = {
"ImageId": argv.get("--ImageId"),
"ImageName": argv.get("--ImageName"),
"ImageDescription": argv.get("--ImageDescription"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyImageAttributeRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyImageAttribute(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInstancesOperationLimit(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeInstancesOperationLimit", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"Operation": argv.get("--Operation"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInstancesOperationLimitRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeInstancesOperationLimit(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doInquiryPriceResetInstancesType(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("InquiryPriceResetInstancesType", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceType": argv.get("--InstanceType"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.InquiryPriceResetInstancesTypeRequest()
model.from_json_string(json.dumps(param))
rsp = client.InquiryPriceResetInstancesType(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInstanceFamilyConfigs(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeInstanceFamilyConfigs", g_param[OptionsDefine.Version])
return
param = {
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInstanceFamilyConfigsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeInstanceFamilyConfigs(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteDisasterRecoverGroups(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteDisasterRecoverGroups", g_param[OptionsDefine.Version])
return
param = {
"DisasterRecoverGroupIds": Utils.try_to_json(argv, "--DisasterRecoverGroupIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteDisasterRecoverGroupsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteDisasterRecoverGroups(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeImportImageOs(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeImportImageOs", g_param[OptionsDefine.Version])
return
param = {
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeImportImageOsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeImportImageOs(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyInstancesProject(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyInstancesProject", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"ProjectId": Utils.try_to_json(argv, "--ProjectId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyInstancesProjectRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyInstancesProject(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doResetInstance(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ResetInstance", g_param[OptionsDefine.Version])
return
param = {
"InstanceId": argv.get("--InstanceId"),
"ImageId": argv.get("--ImageId"),
"SystemDisk": Utils.try_to_json(argv, "--SystemDisk"),
"LoginSettings": Utils.try_to_json(argv, "--LoginSettings"),
"EnhancedService": Utils.try_to_json(argv, "--EnhancedService"),
"HostName": argv.get("--HostName"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ResetInstanceRequest()
model.from_json_string(json.dumps(param))
rsp = client.ResetInstance(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doInquiryPriceRenewInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("InquiryPriceRenewInstances", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceChargePrepaid": Utils.try_to_json(argv, "--InstanceChargePrepaid"),
"DryRun": Utils.try_to_json(argv, "--DryRun"),
"RenewPortableDataDisk": Utils.try_to_json(argv, "--RenewPortableDataDisk"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.InquiryPriceRenewInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.InquiryPriceRenewInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doInquiryPriceRunInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("InquiryPriceRunInstances", g_param[OptionsDefine.Version])
return
param = {
"Placement": Utils.try_to_json(argv, "--Placement"),
"ImageId": argv.get("--ImageId"),
"InstanceChargeType": argv.get("--InstanceChargeType"),
"InstanceChargePrepaid": Utils.try_to_json(argv, "--InstanceChargePrepaid"),
"InstanceType": argv.get("--InstanceType"),
"SystemDisk": Utils.try_to_json(argv, "--SystemDisk"),
"DataDisks": Utils.try_to_json(argv, "--DataDisks"),
"VirtualPrivateCloud": Utils.try_to_json(argv, "--VirtualPrivateCloud"),
"InternetAccessible": Utils.try_to_json(argv, "--InternetAccessible"),
"InstanceCount": Utils.try_to_json(argv, "--InstanceCount"),
"InstanceName": argv.get("--InstanceName"),
"LoginSettings": Utils.try_to_json(argv, "--LoginSettings"),
"SecurityGroupIds": Utils.try_to_json(argv, "--SecurityGroupIds"),
"EnhancedService": Utils.try_to_json(argv, "--EnhancedService"),
"ClientToken": argv.get("--ClientToken"),
"HostName": argv.get("--HostName"),
"TagSpecification": Utils.try_to_json(argv, "--TagSpecification"),
"InstanceMarketOptions": Utils.try_to_json(argv, "--InstanceMarketOptions"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.InquiryPriceRunInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.InquiryPriceRunInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doImportImage(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ImportImage", g_param[OptionsDefine.Version])
return
param = {
"Architecture": argv.get("--Architecture"),
"OsType": argv.get("--OsType"),
"OsVersion": argv.get("--OsVersion"),
"ImageUrl": argv.get("--ImageUrl"),
"ImageName": argv.get("--ImageName"),
"ImageDescription": argv.get("--ImageDescription"),
"DryRun": Utils.try_to_json(argv, "--DryRun"),
"Force": Utils.try_to_json(argv, "--Force"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ImportImageRequest()
model.from_json_string(json.dumps(param))
rsp = client.ImportImage(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRenewInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("RenewInstances", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceChargePrepaid": Utils.try_to_json(argv, "--InstanceChargePrepaid"),
"RenewPortableDataDisk": Utils.try_to_json(argv, "--RenewPortableDataDisk"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RenewInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.RenewInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyDisasterRecoverGroupAttribute(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyDisasterRecoverGroupAttribute", g_param[OptionsDefine.Version])
return
param = {
"DisasterRecoverGroupId": argv.get("--DisasterRecoverGroupId"),
"Name": argv.get("--Name"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyDisasterRecoverGroupAttributeRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyDisasterRecoverGroupAttribute(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInstanceVncUrl(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeInstanceVncUrl", g_param[OptionsDefine.Version])
return
param = {
"InstanceId": argv.get("--InstanceId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInstanceVncUrlRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeInstanceVncUrl(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyInstancesChargeType(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyInstancesChargeType", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceChargeType": argv.get("--InstanceChargeType"),
"InstanceChargePrepaid": Utils.try_to_json(argv, "--InstanceChargePrepaid"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyInstancesChargeTypeRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyInstancesChargeType(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRenewHosts(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("RenewHosts", g_param[OptionsDefine.Version])
return
param = {
"HostIds": Utils.try_to_json(argv, "--HostIds"),
"HostChargePrepaid": Utils.try_to_json(argv, "--HostChargePrepaid"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RenewHostsRequest()
model.from_json_string(json.dumps(param))
rsp = client.RenewHosts(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDisasterRecoverGroups(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeDisasterRecoverGroups", g_param[OptionsDefine.Version])
return
param = {
"DisasterRecoverGroupIds": Utils.try_to_json(argv, "--DisasterRecoverGroupIds"),
"Name": argv.get("--Name"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDisasterRecoverGroupsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeDisasterRecoverGroups(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doStartInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("StartInstances", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StartInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.StartInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doResetInstancesInternetMaxBandwidth(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ResetInstancesInternetMaxBandwidth", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InternetAccessible": Utils.try_to_json(argv, "--InternetAccessible"),
"StartTime": argv.get("--StartTime"),
"EndTime": argv.get("--EndTime"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ResetInstancesInternetMaxBandwidthRequest()
model.from_json_string(json.dumps(param))
rsp = client.ResetInstancesInternetMaxBandwidth(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeKeyPairs(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeKeyPairs", g_param[OptionsDefine.Version])
return
param = {
"KeyIds": Utils.try_to_json(argv, "--KeyIds"),
"Filters": Utils.try_to_json(argv, "--Filters"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeKeyPairsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeKeyPairs(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeReservedInstancesOfferings(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeReservedInstancesOfferings", g_param[OptionsDefine.Version])
return
param = {
"DryRun": Utils.try_to_json(argv, "--DryRun"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"MaxDuration": Utils.try_to_json(argv, "--MaxDuration"),
"MinDuration": Utils.try_to_json(argv, "--MinDuration"),
"Filters": Utils.try_to_json(argv, "--Filters"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeReservedInstancesOfferingsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeReservedInstancesOfferings(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeHosts(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeHosts", g_param[OptionsDefine.Version])
return
param = {
"Filters": Utils.try_to_json(argv, "--Filters"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeHostsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeHosts(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAllocateHosts(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("AllocateHosts", g_param[OptionsDefine.Version])
return
param = {
"Placement": Utils.try_to_json(argv, "--Placement"),
"ClientToken": argv.get("--ClientToken"),
"HostChargePrepaid": Utils.try_to_json(argv, "--HostChargePrepaid"),
"HostChargeType": argv.get("--HostChargeType"),
"HostType": argv.get("--HostType"),
"HostCount": Utils.try_to_json(argv, "--HostCount"),
"TagSpecification": Utils.try_to_json(argv, "--TagSpecification"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AllocateHostsRequest()
model.from_json_string(json.dumps(param))
rsp = client.AllocateHosts(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInternetChargeTypeConfigs(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeInternetChargeTypeConfigs", g_param[OptionsDefine.Version])
return
param = {
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInternetChargeTypeConfigsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeInternetChargeTypeConfigs(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRebootInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("RebootInstances", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"ForceReboot": Utils.try_to_json(argv, "--ForceReboot"),
"StopType": argv.get("--StopType"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RebootInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.RebootInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInstanceTypeConfigs(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeInstanceTypeConfigs", g_param[OptionsDefine.Version])
return
param = {
"Filters": Utils.try_to_json(argv, "--Filters"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInstanceTypeConfigsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeInstanceTypeConfigs(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDisassociateSecurityGroups(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DisassociateSecurityGroups", g_param[OptionsDefine.Version])
return
param = {
"SecurityGroupIds": Utils.try_to_json(argv, "--SecurityGroupIds"),
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CvmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DisassociateSecurityGroupsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DisassociateSecurityGroups(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20170312": cvm_client_v20170312,
}
MODELS_MAP = {
"v20170312": models_v20170312,
}
ACTION_MAP = {
"DescribeImageQuota": doDescribeImageQuota,
"StopInstances": doStopInstances,
"DescribeInstancesStatus": doDescribeInstancesStatus,
"ModifyImageSharePermission": doModifyImageSharePermission,
"DescribeImageSharePermission": doDescribeImageSharePermission,
"InquiryPriceModifyInstancesChargeType": doInquiryPriceModifyInstancesChargeType,
"ModifyHostsAttribute": doModifyHostsAttribute,
"DescribeImages": doDescribeImages,
"ModifyKeyPairAttribute": doModifyKeyPairAttribute,
"DescribeZoneInstanceConfigInfos": doDescribeZoneInstanceConfigInfos,
"ModifyInstancesAttribute": doModifyInstancesAttribute,
"DescribeRegions": doDescribeRegions,
"InquiryPriceResetInstancesInternetMaxBandwidth": doInquiryPriceResetInstancesInternetMaxBandwidth,
"DisassociateInstancesKeyPairs": doDisassociateInstancesKeyPairs,
"CreateKeyPair": doCreateKeyPair,
"DeleteKeyPairs": doDeleteKeyPairs,
"CreateDisasterRecoverGroup": doCreateDisasterRecoverGroup,
"DescribeInstances": doDescribeInstances,
"ImportKeyPair": doImportKeyPair,
"SyncImages": doSyncImages,
"DescribeInstanceInternetBandwidthConfigs": doDescribeInstanceInternetBandwidthConfigs,
"AssociateInstancesKeyPairs": doAssociateInstancesKeyPairs,
"RunInstances": doRunInstances,
"DeleteImages": doDeleteImages,
"InquiryPriceResizeInstanceDisks": doInquiryPriceResizeInstanceDisks,
"TerminateInstances": doTerminateInstances,
"ModifyInstancesVpcAttribute": doModifyInstancesVpcAttribute,
"InquiryPriceResetInstance": doInquiryPriceResetInstance,
"DescribeDisasterRecoverGroupQuota": doDescribeDisasterRecoverGroupQuota,
"ResetInstancesPassword": doResetInstancesPassword,
"ModifyInstancesRenewFlag": doModifyInstancesRenewFlag,
"PurchaseReservedInstancesOffering": doPurchaseReservedInstancesOffering,
"ResizeInstanceDisks": doResizeInstanceDisks,
"DescribeReservedInstances": doDescribeReservedInstances,
"DescribeZones": doDescribeZones,
"CreateImage": doCreateImage,
"AssociateSecurityGroups": doAssociateSecurityGroups,
"ResetInstancesType": doResetInstancesType,
"ModifyImageAttribute": doModifyImageAttribute,
"DescribeInstancesOperationLimit": doDescribeInstancesOperationLimit,
"InquiryPriceResetInstancesType": doInquiryPriceResetInstancesType,
"DescribeInstanceFamilyConfigs": doDescribeInstanceFamilyConfigs,
"DeleteDisasterRecoverGroups": doDeleteDisasterRecoverGroups,
"DescribeImportImageOs": doDescribeImportImageOs,
"ModifyInstancesProject": doModifyInstancesProject,
"ResetInstance": doResetInstance,
"InquiryPriceRenewInstances": doInquiryPriceRenewInstances,
"InquiryPriceRunInstances": doInquiryPriceRunInstances,
"ImportImage": doImportImage,
"RenewInstances": doRenewInstances,
"ModifyDisasterRecoverGroupAttribute": doModifyDisasterRecoverGroupAttribute,
"DescribeInstanceVncUrl": doDescribeInstanceVncUrl,
"ModifyInstancesChargeType": doModifyInstancesChargeType,
"RenewHosts": doRenewHosts,
"DescribeDisasterRecoverGroups": doDescribeDisasterRecoverGroups,
"StartInstances": doStartInstances,
"ResetInstancesInternetMaxBandwidth": doResetInstancesInternetMaxBandwidth,
"DescribeKeyPairs": doDescribeKeyPairs,
"DescribeReservedInstancesOfferings": doDescribeReservedInstancesOfferings,
"DescribeHosts": doDescribeHosts,
"AllocateHosts": doAllocateHosts,
"DescribeInternetChargeTypeConfigs": doDescribeInternetChargeTypeConfigs,
"RebootInstances": doRebootInstances,
"DescribeInstanceTypeConfigs": doDescribeInstanceTypeConfigs,
"DisassociateSecurityGroups": doDisassociateSecurityGroups,
}
AVAILABLE_VERSION_LIST = [
v20170312.version,
]
AVAILABLE_VERSIONS = {
'v' + v20170312.version.replace('-', ''): {"help": v20170312_help.INFO,"desc": v20170312_help.DESC},
}
def cvm_action(argv, arglist):
if "help" in argv:
versions = sorted(AVAILABLE_VERSIONS.keys())
opt_v = "--" + OptionsDefine.Version
version = versions[-1]
if opt_v in argv:
version = 'v' + argv[opt_v].replace('-', '')
if version not in versions:
print("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return
action_str = ""
docs = AVAILABLE_VERSIONS[version]["help"]
desc = AVAILABLE_VERSIONS[version]["desc"]
for action, info in docs.items():
action_str += " %s\n" % action
action_str += Utils.split_str(" ", info["desc"], 120)
helpstr = HelpTemplate.SERVICE % {"name": "cvm", "desc": desc, "actions": action_str}
print(helpstr)
else:
print(ErrorMsg.FEW_ARG)
def version_merge():
help_merge = {}
for v in AVAILABLE_VERSIONS:
for action in AVAILABLE_VERSIONS[v]["help"]:
if action not in help_merge:
help_merge[action] = {}
help_merge[action]["cb"] = ACTION_MAP[action]
help_merge[action]["params"] = []
for param in AVAILABLE_VERSIONS[v]["help"][action]["params"]:
if param["name"] not in help_merge[action]["params"]:
help_merge[action]["params"].append(param["name"])
return help_merge
def register_arg(command):
cmd = NiceCommand("cvm", cvm_action)
command.reg_cmd(cmd)
cmd.reg_opt("help", "bool")
cmd.reg_opt(OptionsDefine.Version, "string")
help_merge = version_merge()
for actionName, action in help_merge.items():
c = NiceCommand(actionName, action["cb"])
cmd.reg_cmd(c)
c.reg_opt("help", "bool")
for param in action["params"]:
c.reg_opt("--" + param, "string")
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
c.reg_opt(stropt, "string")
def parse_global_arg(argv):
params = {}
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
if stropt in argv:
params[opt] = argv[stropt]
else:
params[opt] = None
if params[OptionsDefine.Version]:
params[OptionsDefine.Version] = "v" + params[OptionsDefine.Version].replace('-', '')
config_handle = Configure()
profile = config_handle.profile
if ("--" + OptionsDefine.Profile) in argv:
profile = argv[("--" + OptionsDefine.Profile)]
is_conexist, conf_path = config_handle._profile_existed(profile + "." + config_handle.configure)
is_creexist, cred_path = config_handle._profile_existed(profile + "." + config_handle.credential)
config = {}
cred = {}
if is_conexist:
config = config_handle._load_json_msg(conf_path)
if is_creexist:
cred = config_handle._load_json_msg(cred_path)
if os.environ.get(OptionsDefine.ENV_SECRET_ID):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
if os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
if os.environ.get(OptionsDefine.ENV_REGION):
config[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
for param in params.keys():
if param == OptionsDefine.Version:
continue
if params[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId]:
if param in cred:
params[param] = cred[param]
else:
raise Exception("%s is invalid" % param)
else:
if param in config:
params[param] = config[param]
elif param == OptionsDefine.Region:
raise Exception("%s is invalid" % OptionsDefine.Region)
try:
if params[OptionsDefine.Version] is None:
version = config["cvm"][OptionsDefine.Version]
params[OptionsDefine.Version] = "v" + version.replace('-', '')
if params[OptionsDefine.Endpoint] is None:
params[OptionsDefine.Endpoint] = config["cvm"][OptionsDefine.Endpoint]
except Exception as err:
raise Exception("config file:%s error, %s" % (conf_path, str(err)))
versions = sorted(AVAILABLE_VERSIONS.keys())
if params[OptionsDefine.Version] not in versions:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return params
def show_help(action, version):
docs = AVAILABLE_VERSIONS[version]["help"][action]
desc = AVAILABLE_VERSIONS[version]["desc"]
docstr = ""
for param in docs["params"]:
docstr += " %s\n" % ("--" + param["name"])
docstr += Utils.split_str(" ", param["desc"], 120)
helpmsg = HelpTemplate.ACTION % {"name": action, "service": "cvm", "desc": desc, "params": docstr}
print(helpmsg)
def get_actions_info():
config = Configure()
new_version = max(AVAILABLE_VERSIONS.keys())
version = new_version
try:
profile = config._load_json_msg(os.path.join(config.cli_path, "default.configure"))
version = profile["cvm"]["version"]
version = "v" + version.replace('-', '')
except Exception:
pass
if version not in AVAILABLE_VERSIONS.keys():
version = new_version
return AVAILABLE_VERSIONS[version]["help"]
| 42.048742
| 105
| 0.705512
|
45f7a9a63d5e34b9807dd95ac99a1ac570bb9715
| 7,855
|
py
|
Python
|
src/image_generator.py
|
antonmattsson/diabetic_retinopathy
|
6aaa0ab3631d1cb0075d3acf2778822d62d1b532
|
[
"MIT"
] | null | null | null |
src/image_generator.py
|
antonmattsson/diabetic_retinopathy
|
6aaa0ab3631d1cb0075d3acf2778822d62d1b532
|
[
"MIT"
] | null | null | null |
src/image_generator.py
|
antonmattsson/diabetic_retinopathy
|
6aaa0ab3631d1cb0075d3acf2778822d62d1b532
|
[
"MIT"
] | null | null | null |
import numpy as np
from skimage.io import imread
from skimage.transform import resize
from sklearn.feature_extraction.image import extract_patches_2d
from keras.utils import Sequence, to_categorical
from numpy.core.defchararray import add, replace
import matplotlib.pyplot as plt
class ImageGenerator(Sequence):
'''
Class for generating image batches from the image files
:param image_filenames: 1D numpy array (or list) of file names of the images
:param labels: 1D numpy array with the labels corresponding to each image
:param batch_size: integer giving the batch size to be used in training the network
:param image_shape: tuple of two integers. All images will be compressed to this shape
'''
def __init__(self, image_filenames, labels, batch_size, image_shape):
self.image_filenames, self.labels = image_filenames, labels
self.image_shape, self.batch_size = image_shape, batch_size
def __len__(self):
return int(np.ceil(len(self.image_filenames) / float(self.batch_size)))
# Helper function to read and preprocess images
def _read_image(self, filename):
image = resize(imread(filename), self.image_shape)
# Normalize pixel values between 0 and 1
image = image / 255
return image
def __getitem__(self, idx):
batch_x = self.image_filenames[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([self._read_image(file_name) for file_name in batch_x]),\
to_categorical(np.array(batch_y), num_classes=5)
class PatchGenerator(ImageGenerator):
def __init__(self, image_filenames, labels, batch_size, patch_shape, n_patches):
self.image_filenames, self.labels = image_filenames, labels
self.batch_size = batch_size
self.patch_shape, self.n_patches = patch_shape, n_patches
def _read_image(self, filename):
image = imread(filename)
# Normalize pixel values between 0 and 1
image = image / 255
patches = extract_patches_2d(image, patch_size=self.patch_shape,
max_patches=self.n_patches, random_state=38)
return patches
class ArrayGenerator(Sequence):
'''
Class for generating arrays for training
:param filenames: 1D array of filenames to read from, ending with .npy
:param labels: 1D array of strings, labels
:param batch_size: integer giving the batch size to be used in training the network
'''
def __init__(self, filenames, labels, batch_size):
self.filenames = filenames
self.labels, self.batch_size = labels, batch_size
def __len__(self):
return int(np.ceil(len(self.filenames) / float(self.batch_size)))
def read_array(self, filename):
array = np.load(filename)
return array
def __getitem__(self, idx):
batch_x = self.filenames[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([np.load(file_name) for file_name in batch_x]),\
to_categorical(np.array(batch_y), num_classes=5)
def get_generators(n_total, batch_size, image_shape=None, type='array', zeros_left=5000):
'''
Construct generators for training and validation data
Zero grade images are downsampled
:param n_total: number of total images to use (training plus validation)
:param batch_size: batch size used in training
:param image_shape: image size used in training
:param zeros_left: how many images of grade zero should be left in the pool
use a negative value to keep all the zeros
:return: train_gen: generator of training data
test_gen: generator of validation data
'''
# Set the number of training samples
n_train = int(np.ceil(n_total * 0.8))
n_test = int(np.floor(n_total * 0.2))
# Read filenames from a text file listing all the images
full_filenames = np.genfromtxt('../data/train_filenames.txt', dtype=str)
# Read the labels file
full_labels = np.genfromtxt('../data/trainLabels.csv', skip_header=1, dtype=str, delimiter=',')
# Keep only labels of data that can be used in training
full_samples = replace(full_filenames, ".jpeg", "")
full_mask = np.isin(full_labels[:, 0], full_samples)
trainable_labels = np.copy(full_labels[full_mask, :])
# Downsample the zero grade, keeping only the first 5000
# Randomize order
np.random.seed(1234)
np.random.shuffle(trainable_labels)
# Arrange by a stable sort (mergesort)
trainable_labels = np.copy(trainable_labels[trainable_labels[:,1].argsort(kind='mergesort')])
# Remove extra zeros
if zeros_left > 0:
_, counts = np.unique(trainable_labels[:,1], return_counts=True)
n_zeros = counts[0]
downsampled_labels = np.copy(trainable_labels[(n_zeros-zeros_left):, :])
else:
downsampled_labels = np.copy(trainable_labels)
# Randomize and choose training data
np.random.shuffle(downsampled_labels)
train_labels = downsampled_labels[:n_train, :]
#test_labels = downsampled_labels[n_train:(n_train + n_test)]
# Exclude training samples from the original data and choose test data among them
np.random.shuffle(trainable_labels)
exclusion = np.isin(trainable_labels[:, 0], train_labels[:, 0], invert=True)
valid_labels = np.copy(trainable_labels[exclusion, :])
test_labels = np.copy(valid_labels[:n_test, :])
# Print the counts of each class in test and train data
_, train_counts = np.unique(train_labels[:, 1], return_counts=True)
print("\nTrain distribution:")
print(train_counts/np.sum(train_counts))
_, test_counts = np.unique(test_labels[:, 1], return_counts=True)
print("\nTest distribution:")
print(test_counts/np.sum(test_counts))
print("\n")
if type == 'array':
# Add .npy file ending
train_filenames = add(train_labels[:, 0], np.full(shape=n_train, fill_value='.npy'))
test_filenames = add(test_labels[:, 0], np.full(shape=n_test, fill_value='.npy'))
# Add path of the data folder to the files
train_filepaths = add(np.full(shape=train_filenames.shape, fill_value='../data/arrays/'), train_filenames)
test_filepaths = add(np.full(shape=test_filenames.shape, fill_value='../data/arrays/'), test_filenames)
# Create an instance of the image generator
train_gen = ArrayGenerator(train_filepaths, train_labels[:, 1], batch_size)
test_gen = ArrayGenerator(test_filepaths, test_labels[:, 1], batch_size)
elif type == 'image':
if image_shape is None:
raise ValueError
# Add .jpeg file ending
train_filenames = add(train_labels[:, 0], np.full(shape=n_train, fill_value='.jpeg'))
test_filenames = add(test_labels[:, 0], np.full(shape=n_test, fill_value='.jpeg'))
# Add path of the data folder to the files
train_filepaths = add(np.full(shape=train_filenames.shape, fill_value='../data/train/'), train_filenames)
test_filepaths = add(np.full(shape=test_filenames.shape, fill_value='../data/train/'), test_filenames)
# Create an instance of the image generator
train_gen = ImageGenerator(train_filepaths, train_labels[:, 1], batch_size, image_shape)
test_gen = ImageGenerator(test_filepaths, test_labels[:, 1], batch_size, image_shape)
return train_gen, test_gen
if __name__ == "__main__":
train_gen, test_gen = get_generators(n_total=2401, batch_size=1, image_shape=(512, 512), type='array')
print((len(train_gen), len(test_gen)))
#img = train_gen[0][0][0, ::]
#print(img.shape)
#plt.figure()
#plt.imshow(img*255)
#plt.show()
| 44.885714
| 114
| 0.693571
|
69055d4d6a23bb99f543fb5fad197e179c7b0794
| 859
|
py
|
Python
|
ooobuild/dyn/drawing/measure_properties.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/drawing/measure_properties.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/drawing/measure_properties.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.drawing
from ...lo.drawing.measure_properties import MeasureProperties as MeasureProperties
__all__ = ['MeasureProperties']
| 33.038462
| 83
| 0.762515
|
c80cf7fd5fd698e87b413606374a2d0582a151ee
| 3,806
|
py
|
Python
|
src/models/DispRefine2D.py
|
sunshuofeng/Bi3D
|
aa4a4bf739017d0a9bf0149a6df891f3b97752cb
|
[
"BSD-Source-Code"
] | null | null | null |
src/models/DispRefine2D.py
|
sunshuofeng/Bi3D
|
aa4a4bf739017d0a9bf0149a6df891f3b97752cb
|
[
"BSD-Source-Code"
] | null | null | null |
src/models/DispRefine2D.py
|
sunshuofeng/Bi3D
|
aa4a4bf739017d0a9bf0149a6df891f3b97752cb
|
[
"BSD-Source-Code"
] | null | null | null |
# MIT License
#
# Copyright (c) 2019 Xuanyi Li (xuanyili.edu@gmail.com)
# Copyright (c) 2020 NVIDIA
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from bi_models.PSMNet import conv2d
from bi_models.PSMNet import conv2d_lrelu
"""
The code in this file is adapted
from https://github.com/meteorshowers/StereoNet-ActiveStereoNet
"""
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = conv2d_lrelu(inplanes, planes, 3, stride, pad, dilation)
self.conv2 = conv2d(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class DispRefineNet(nn.Module):
def __init__(self, out_planes=32):
super(DispRefineNet, self).__init__()
self.out_planes = out_planes
self.conv2d_feature = conv2d_lrelu(
in_planes=4, out_planes=self.out_planes, kernel_size=3, stride=1, pad=1, dilation=1
)
self.residual_astrous_blocks = nn.ModuleList()
astrous_list = [1, 2, 4, 8, 1, 1]
for di in astrous_list:
self.residual_astrous_blocks.append(
BasicBlock(self.out_planes, self.out_planes, stride=1, downsample=None, pad=1, dilation=di)
)
self.conv2d_out = nn.Conv2d(self.out_planes, 1, kernel_size=3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
return
def forward(self, x):
disp = x[:, 0, :, :][:, None, :, :]
output = self.conv2d_feature(x)
for astrous_block in self.residual_astrous_blocks:
output = astrous_block(output)
output = self.conv2d_out(output) # residual disparity
output = output + disp # final disparity
return output
| 33.095652
| 107
| 0.652916
|
f755629790bb8cde966f4ceb680ec5a084e18788
| 9,889
|
py
|
Python
|
custom_components/hacs/hacsbase/data.py
|
Lucstricke/integration
|
1543686f3d99c8f16ec4fc37b2edd70b2a3e29a5
|
[
"MIT"
] | 1
|
2021-12-12T18:19:48.000Z
|
2021-12-12T18:19:48.000Z
|
custom_components/hacs/hacsbase/data.py
|
Lucstricke/integration
|
1543686f3d99c8f16ec4fc37b2edd70b2a3e29a5
|
[
"MIT"
] | null | null | null |
custom_components/hacs/hacsbase/data.py
|
Lucstricke/integration
|
1543686f3d99c8f16ec4fc37b2edd70b2a3e29a5
|
[
"MIT"
] | null | null | null |
"""Data handler for HACS."""
import asyncio
import os
from homeassistant.core import callback
from custom_components.hacs.helpers.classes.manifest import HacsManifest
from custom_components.hacs.helpers.functions.register_repository import (
register_repository,
)
from custom_components.hacs.helpers.functions.store import (
async_load_from_store,
async_save_to_store,
async_save_to_store_default_encoder,
get_store_for_key,
)
from custom_components.hacs.share import get_hacs
from custom_components.hacs.utils.logger import getLogger
def update_repository_from_storage(repository, storage_data):
"""Merge in data from storage into the repo data."""
repository.data.memorize_storage(storage_data)
repository.data.update_data(storage_data)
if repository.data.installed:
return
repository.logger.debug("%s Should be installed but is not... Fixing that!", repository)
repository.data.installed = True
class HacsData:
"""HacsData class."""
def __init__(self):
"""Initialize."""
self.logger = getLogger()
self.hacs = get_hacs()
self.content = {}
async def async_write(self):
"""Write content to the store files."""
if self.hacs.status.background_task or self.hacs.system.disabled:
return
self.logger.debug("Saving data")
# Hacs
await async_save_to_store(
self.hacs.hass,
"hacs",
{
"view": self.hacs.configuration.frontend_mode,
"compact": self.hacs.configuration.frontend_compact,
"onboarding_done": self.hacs.configuration.onboarding_done,
"archived_repositories": self.hacs.common.archived_repositories,
"renamed_repositories": self.hacs.common.renamed_repositories,
},
)
await self._async_store_content_and_repos()
for event in ("hacs/repository", "hacs/config"):
self.hacs.hass.bus.async_fire(event, {})
async def _async_store_content_and_repos(self): # bb: ignore
"""Store the main repos file and each repo that is out of date."""
# Repositories
self.content = {}
# Not run concurrently since this is bound by disk I/O
for repository in self.hacs.repositories.list_all:
await self.async_store_repository_data(repository)
await async_save_to_store(self.hacs.hass, "repositories", self.content)
async def async_store_repository_data(self, repository):
repository_manifest = repository.repository_manifest.manifest
data = {
"authors": repository.data.authors,
"category": repository.data.category,
"description": repository.data.description,
"domain": repository.data.domain,
"downloads": repository.data.downloads,
"etag_repository": repository.data.etag_repository,
"full_name": repository.data.full_name,
"first_install": repository.status.first_install,
"installed_commit": repository.data.installed_commit,
"installed": repository.data.installed,
"last_commit": repository.data.last_commit,
"last_release_tag": repository.data.last_version,
"last_updated": repository.data.last_updated,
"name": repository.data.name,
"new": repository.data.new,
"repository_manifest": repository_manifest,
"selected_tag": repository.data.selected_tag,
"show_beta": repository.data.show_beta,
"stars": repository.data.stargazers_count,
"topics": repository.data.topics,
"version_installed": repository.data.installed_version,
}
self.content[str(repository.data.id)] = data
if (
repository.data.installed
and (repository.data.installed_commit or repository.data.installed_version)
and (export := repository.data.export_data())
):
# export_data will return `None` if the memorized
# data is already up to date which allows us to avoid
# writing data that is already up to date or generating
# executor jobs to check the data on disk to see
# if a write is needed.
await async_save_to_store_default_encoder(
self.hacs.hass,
f"hacs/{repository.data.id}.hacs",
export,
)
repository.data.memorize_storage(export)
async def restore(self):
"""Restore saved data."""
hacs = await async_load_from_store(self.hacs.hass, "hacs")
repositories = await async_load_from_store(self.hacs.hass, "repositories") or {}
if not hacs and not repositories:
# Assume new install
self.hacs.status.new = True
return True
self.logger.info("Restore started")
self.hacs.status.new = False
# Hacs
self.hacs.configuration.frontend_mode = hacs.get("view", "Grid")
self.hacs.configuration.frontend_compact = hacs.get("compact", False)
self.hacs.configuration.onboarding_done = hacs.get("onboarding_done", False)
self.hacs.common.archived_repositories = hacs.get("archived_repositories", [])
self.hacs.common.renamed_repositories = {}
# Clear out doubble renamed values
renamed = hacs.get("renamed_repositories", {})
for entry in renamed:
value = renamed.get(entry)
if value not in renamed:
self.hacs.common.renamed_repositories[entry] = value
hass = self.hacs.hass
stores = {}
try:
await self.register_unknown_repositories(repositories)
for entry, repo_data in repositories.items():
if entry == "0":
# Ignore repositories with ID 0
self.logger.debug("Found repository with ID %s - %s", entry, repo_data)
continue
if self.async_restore_repository(entry, repo_data):
stores[entry] = get_store_for_key(hass, f"hacs/{entry}.hacs")
def _load_from_storage():
for entry, store in stores.items():
if os.path.exists(store.path) and (data := store.load()):
if (full_name := data.get("full_name")) and (
renamed := self.hacs.common.renamed_repositories.get(full_name)
) is not None:
data["full_name"] = renamed
update_repository_from_storage(
self.hacs.repositories.get_by_id(entry), data
)
await hass.async_add_executor_job(_load_from_storage)
self.logger.info("Restore done")
except (Exception, BaseException) as exception: # pylint: disable=broad-except
self.logger.critical(f"[{exception}] Restore Failed!", exc_info=exception)
return False
return True
async def register_unknown_repositories(self, repositories):
"""Registry any unknown repositories."""
register_tasks = [
register_repository(
full_name=repo_data["full_name"],
category=repo_data["category"],
check=False,
repo_id=entry,
)
for entry, repo_data in repositories.items()
if entry != "0" and not self.hacs.repositories.is_registered(repository_id=entry)
]
if register_tasks:
await asyncio.gather(*register_tasks)
@callback
def async_restore_repository(self, entry, repository_data):
full_name = repository_data["full_name"]
if not (repository := self.hacs.repositories.get_by_full_name(full_name)):
self.logger.error(f"Did not find {full_name} ({entry})")
return False
# Restore repository attributes
self.hacs.async_set_repository_id(repository, entry)
repository.data.authors = repository_data.get("authors", [])
repository.data.description = repository_data.get("description")
repository.releases.last_release_object_downloads = repository_data.get("downloads")
repository.data.last_updated = repository_data.get("last_updated")
repository.data.etag_repository = repository_data.get("etag_repository")
repository.data.topics = repository_data.get("topics", [])
repository.data.domain = repository_data.get("domain", None)
repository.data.stargazers_count = repository_data.get("stars", 0)
repository.releases.last_release = repository_data.get("last_release_tag")
repository.data.hide = repository_data.get("hide", False)
repository.data.installed = repository_data.get("installed", False)
repository.data.new = repository_data.get("new", True)
repository.data.selected_tag = repository_data.get("selected_tag")
repository.data.show_beta = repository_data.get("show_beta", False)
repository.data.last_version = repository_data.get("last_release_tag")
repository.data.last_commit = repository_data.get("last_commit")
repository.data.installed_version = repository_data.get("version_installed")
repository.data.installed_commit = repository_data.get("installed_commit")
repository.repository_manifest = HacsManifest.from_dict(
repository_data.get("repository_manifest", {})
)
if repository.data.installed:
repository.status.first_install = False
if full_name == "hacs/integration":
repository.data.installed_version = self.hacs.version
repository.data.installed = True
return True
| 42.995652
| 93
| 0.639498
|
1167fb36a6faa95ef144f5b8b1ff42ee4bcfda3c
| 361
|
py
|
Python
|
2-2_Q2.py
|
Soooyeon-Kim/Algorithm
|
28a191d7382d9c3bb6d9afb19f4cff642c3aec03
|
[
"MIT"
] | null | null | null |
2-2_Q2.py
|
Soooyeon-Kim/Algorithm
|
28a191d7382d9c3bb6d9afb19f4cff642c3aec03
|
[
"MIT"
] | null | null | null |
2-2_Q2.py
|
Soooyeon-Kim/Algorithm
|
28a191d7382d9c3bb6d9afb19f4cff642c3aec03
|
[
"MIT"
] | null | null | null |
# 2주차 실습 2번 줄세우기
def lining(n) :
MOD = int(1e9 + 7)
dp = [[0, 0] for i in range(101)]
dp[1][0] = 1
dp[1][1] = 1
for i in range(2, 101):
dp[i][0] = (dp[i - 1][0] + dp[i - 1][1]) % MOD # 끝이 여학생인 경우
dp[i][1] = dp[i - 1][0] # 끝이 남학생인 경우 -> 자기 앞에 여학생만 와야 한다!!!
return (dp[n][0] + dp[n][1]) % MOD
| 27.769231
| 68
| 0.401662
|
a41d247dd75d806c85a974f8352f921796f5c206
| 186
|
py
|
Python
|
benchmarking/framework/run_all_t.py
|
coyizumi/nvme-hpe
|
d2be43668a92891e81150232e17e4fa6a415f398
|
[
"MIT"
] | null | null | null |
benchmarking/framework/run_all_t.py
|
coyizumi/nvme-hpe
|
d2be43668a92891e81150232e17e4fa6a415f398
|
[
"MIT"
] | null | null | null |
benchmarking/framework/run_all_t.py
|
coyizumi/nvme-hpe
|
d2be43668a92891e81150232e17e4fa6a415f398
|
[
"MIT"
] | null | null | null |
# Authors: Coy Humphrey, Jayden Navarro
# Project: HPE UCSC Senior Design 2016
# Date: 4-7-2016
import ib_send_bw
from framework import setDirectory
setDirectory()
ib_send_bw.run_t()
| 16.909091
| 39
| 0.77957
|
89320d8c052e0ecffa026da1e3dfe47ea33fc16b
| 8,696
|
py
|
Python
|
concepts/formats.py
|
ymizoguchi/concepts
|
7c7533a90e6dbd55d15f52f95f7f0f99e1ca6f47
|
[
"MIT"
] | null | null | null |
concepts/formats.py
|
ymizoguchi/concepts
|
7c7533a90e6dbd55d15f52f95f7f0f99e1ca6f47
|
[
"MIT"
] | null | null | null |
concepts/formats.py
|
ymizoguchi/concepts
|
7c7533a90e6dbd55d15f52f95f7f0f99e1ca6f47
|
[
"MIT"
] | 1
|
2020-10-13T12:44:46.000Z
|
2020-10-13T12:44:46.000Z
|
# formats.py - parse and serialize FCA context tables
"""Parse and serialize formal contexts in different formats."""
import io
import os
import csv
import contextlib
from ._compat import PY2, text_type, zip, with_metaclass, StringIO
from . import _compat_csv
from . import tools
__all__ = ['Format']
class FormatMeta(type):
"""Collect and retrieve concrete ``Format`` subclasses by name."""
_map = {}
by_suffix = {}
def __init__(self, name, bases, dct): # noqa: N804
if not dct.get('__abstract__'):
if 'name' not in dct:
self.name = name.lower()
if 'suffix' in dct:
self.by_suffix[self.suffix] = self.name
self._map[self.name] = self
def __getitem__(self, name): # noqa: N804
try:
return self._map[name.lower()]
except KeyError:
raise KeyError('%r unknown format: %r' % (self, name))
def infer_format(self, filename, frmat=None): # noqa: N804
_, suffix = os.path.splitext(filename)
try:
return self.by_suffix[suffix.lower()]
except KeyError:
raise ValueError('cannot infer file format from filename suffix'
' %r, please specify ``frmat``' % (suffix,))
class Format(with_metaclass(FormatMeta, object)):
"""Parse and serialize formal contexts in a specific string format."""
__abstract__ = True
encoding = None
normalize_newlines = True
@staticmethod
def loads(source, **kwargs):
"""Parse source string and return ``(objects, properties, bools)``."""
raise NotImplementedError # pragma: no cover
@staticmethod
def dumps(objects, properties, bools, **kwargs):
"""Serialize ``(objects, properties, bools)`` and return string."""
raise NotImplementedError # pragma: no cover
@classmethod
def load(cls, filename, encoding):
"""Load and parse serialized objects, properties, bools from file."""
if encoding is None:
encoding = cls.encoding
with io.open(filename, 'r', encoding=encoding) as fd:
source = fd.read()
if cls.normalize_newlines:
source = source.replace('\r\n', '\n').replace('\r', '\n')
return cls.loads(source)
@classmethod
def dump(cls, filename, objects, properties, bools, encoding):
"""Write serialized objects, properties, bools to file."""
if encoding is None:
encoding = cls.encoding
source = cls.dumps(objects, properties, bools)
if PY2:
source = unicode(source)
with io.open(filename, 'w', encoding=encoding) as fd:
fd.write(source)
class Cxt(Format):
"""Formal context in the classic CXT format."""
suffix = '.cxt'
@staticmethod
def loads(source):
b, yx, table = source.strip().split('\n\n')
y, x = (int(i) for i in yx.split())
lines = [l.strip() for l in table.strip().split('\n')]
objects = lines[:y]
properties = lines[y:y + x]
bools = [tuple(f == 'X' for f in l) for l in lines[y + x:]]
return objects, properties, bools
@staticmethod
def dumps(objects, properties, bools):
result = ['B', '', '%d' % len(objects), '%d' % len(properties), '']
result.extend(objects)
result.extend(properties)
result.extend(''.join('X' if b else '.' for b in intent)
for intent in bools)
result.append('')
return '\n'.join(result)
class Table(Format):
"""Formal context as ASCII-art style table."""
suffix = '.txt'
@staticmethod
def escape(item):
return text_type(item).encode('ascii', 'backslashreplace')
@staticmethod
def loads(source):
lines = (l.partition('#')[0].strip() for l in source.splitlines())
lines = list(filter(None, lines))
properties = [p.strip() for p in lines[0].strip('|').split('|')]
table = [(obj.strip(),
tuple(bool(f.strip()) for f in flags.strip('|').split('|')))
for obj, flags in
(objflags.partition('|')[::2] for objflags in lines[1:])]
objects, bools = zip(*table)
return objects, properties, bools
@staticmethod
def dumps(objects, properties, bools, escape=False, indent=0):
if escape:
objects = list(map(Table.escape, objects))
properties = list(map(Table.escape, properties))
wd = [tools.max_len(objects)]
wd.extend(map(len, properties))
tmpl = ' ' * indent + '|'.join('%%-%ds' % w for w in wd) + '|'
result = [tmpl % (('',) + tuple(properties))]
result.extend(tmpl % ((o,) + tuple('X' if b else '' for b in intent))
for o, intent in zip(objects, bools))
return '\n'.join(result)
class Csv(Format):
"""Formal context as CSV table."""
suffix = '.csv'
dialect = csv.excel
@staticmethod
def _load(reader):
objects, bools = [], []
properties = next(reader)[1:]
for cols in reader:
objects.append(cols[0])
bools.append(tuple(c == 'X' for c in cols[1:]))
return objects, properties, bools
@staticmethod
def _dump(writer, objects, properties, bools):
symbool = ('', 'X').__getitem__
writer.writerow([''] + list(properties))
writer.writerows([o] + list(map(symbool, bs))
for o, bs in zip(objects, bools))
@classmethod
def loads(cls, source, dialect=None):
if dialect is None:
dialect = cls.dialect
csv_reader = csv.reader
if PY2 and isinstance(source, unicode):
csv_reader = _compat_csv.UnicodeCsvReader
with contextlib.closing(StringIO(source)) as fd:
reader = csv_reader(fd, dialect)
return cls._load(reader)
@classmethod
def dumps(cls, objects, properties, bools, dialect=None):
if dialect is None:
dialect = cls.dialect
csv_writer = csv.writer
kwargs = {}
if PY2 and not all(isinstance(s, str) for s in objects + properties):
csv_writer = _compat_csv.UnicodeCsvWriter
kwargs = {'encoding': 'utf-8'}
with contextlib.closing(StringIO()) as fd:
writer = csv_writer(fd, dialect, **kwargs)
cls._dump(writer, objects, properties, bools)
result = fd.getvalue()
if 'encoding' in kwargs:
result = result.decode(kwargs['encoding'])
return result
@classmethod
def load(cls, filename, encoding, dialect=None):
if encoding is None:
encoding = cls.encoding
if dialect is None:
dialect = cls.dialect
if PY2:
if encoding is None:
with open(filename, 'rb') as fd:
reader = csv.reader(fd, dialect)
return cls._load(reader)
else:
with io.open(filename, 'r', encoding=encoding, newline='') as fd:
reader = _compat_csv.UnicodeCsvReader(fd, dialect)
return cls._load(reader)
else:
with io.open(filename, 'r', encoding=encoding, newline='') as fd:
reader = csv.reader(fd, dialect)
return cls._load(reader)
@classmethod
def dump(cls, filename, objects, properties, bools, encoding, dialect=None):
if encoding is None:
encoding = cls.encoding
if dialect is None:
dialect = cls.dialect
if PY2:
with open(filename, 'wb') as fd:
if encoding is None:
writer = csv.writer(fd, dialect)
else:
writer = _compat_csv.UnicodeCsvWriter(fd, dialect, encoding)
return cls._dump(writer, objects, properties, bools)
else:
with io.open(filename, 'w', encoding=encoding, newline='') as fd:
writer = csv.writer(fd, dialect)
return cls._dump(writer, objects, properties, bools)
class WikiTable(Format):
"""Formal context as MediaWiki markup table."""
@staticmethod
def dumps(objects, properties, bools):
result = ['{| class="featuresystem"', '!',
'!%s' % '!!'.join(properties)]
wp = list(map(len, properties))
for o, intent in zip(objects, bools):
bcells = (('X' if b else '').ljust(w) for w, b in zip(wp, intent))
result += ['|-', '!%s' % o, '|%s' % '||'.join(bcells)]
result.append('|}')
return '\n'.join(result)
| 32.207407
| 81
| 0.567042
|
0f15885d8a5efb1853198499898d63ec8fb4de3e
| 3,396
|
py
|
Python
|
libs/configs/DOTA1.0/CSL/cfgs_res50_dota_v24.py
|
loceyi/CSL_RetinaNet_Tensorflow
|
c2de594ca1754dfa87f7271aa01052b0d001967a
|
[
"Apache-2.0"
] | 187
|
2020-03-11T05:41:59.000Z
|
2022-03-28T04:44:03.000Z
|
libs/configs/DOTA1.0/CSL/cfgs_res50_dota_v24.py
|
loceyi/CSL_RetinaNet_Tensorflow
|
c2de594ca1754dfa87f7271aa01052b0d001967a
|
[
"Apache-2.0"
] | 13
|
2020-07-16T09:00:11.000Z
|
2021-11-05T12:15:35.000Z
|
libs/configs/DOTA1.0/CSL/cfgs_res50_dota_v24.py
|
loceyi/CSL_RetinaNet_Tensorflow
|
c2de594ca1754dfa87f7271aa01052b0d001967a
|
[
"Apache-2.0"
] | 31
|
2020-03-21T08:11:36.000Z
|
2022-03-16T09:18:33.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
"""
v19 + pulse label
"""
# ------------------------------------------------
VERSION = 'RetinaNet_DOTA_1x_20191219'
NET_NAME = 'resnet50_v1d' # 'MobilenetV2'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 27000
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
ANGLE_WEIGHT = 0.5
REG_LOSS_MODE = None
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 5e-4
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'DOTA' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 800
CLASS_NUM = 15
LABEL_TYPE = 2
RADUIUS = 4
OMEGA = 1
IMG_ROTATE = False
RGB2GRAY = False
VERTICAL_FLIP = False
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# --------------------------------------------- Network_config
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
# ---------------------------------------------Anchor config
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 180 # 180 or 90
# --------------------------------------------RPN config
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.4
| 30.053097
| 105
| 0.660483
|
dedea4c38a97431ea4d2d62a96a01426a1c57e1e
| 99
|
py
|
Python
|
script/SG1M-DoubleArmV7A.py
|
WRS-TDRRC/WRS-TDRRC-2020SG1
|
5656ac6f8561a77f500aaf8dbe3f3aec5ce0ebe7
|
[
"CC0-1.0"
] | 1
|
2021-09-22T00:36:24.000Z
|
2021-09-22T00:36:24.000Z
|
script/SG1M-DoubleArmV7A.py
|
WRS-TDRRC/WRS-TDRRC-2020SG1
|
5656ac6f8561a77f500aaf8dbe3f3aec5ce0ebe7
|
[
"CC0-1.0"
] | 1
|
2021-07-11T23:05:02.000Z
|
2021-07-11T23:05:02.000Z
|
script/SG1M-DoubleArmV7A.py
|
WRS-TDRRC/WRS-TDRRC-2020SG1
|
5656ac6f8561a77f500aaf8dbe3f3aec5ce0ebe7
|
[
"CC0-1.0"
] | null | null | null |
import WRSUtil
WRSUtil.loadProject(
"MultiSceneViews", "SG1M", "AGXSimulator", "DoubleArmV7A")
| 24.75
| 62
| 0.747475
|
fad198dc113a366e451f55e352666076c61621cf
| 166
|
py
|
Python
|
example/example/wsgi.py
|
valsplat/django-ajaximage
|
6e6ae53464acb1c308149f264d437246f1571288
|
[
"MIT"
] | 11
|
2018-01-29T12:57:58.000Z
|
2020-07-27T22:47:14.000Z
|
example/example/wsgi.py
|
valsplat/django-ajaximage
|
6e6ae53464acb1c308149f264d437246f1571288
|
[
"MIT"
] | 5
|
2021-03-19T03:23:05.000Z
|
2022-03-11T23:58:21.000Z
|
example/example/wsgi.py
|
valsplat/django-ajaximage
|
6e6ae53464acb1c308149f264d437246f1571288
|
[
"MIT"
] | 2
|
2019-09-03T11:22:56.000Z
|
2020-04-10T14:20:57.000Z
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 27.666667
| 67
| 0.837349
|
3254e1b86dbe0697951710615d6a0d6d1944237c
| 53,919
|
gyp
|
Python
|
net/net.gyp
|
SlimKatLegacy/android_external_chromium
|
bc611cda58cc18d0dbaa8a7aee05eb3c0742e573
|
[
"BSD-3-Clause"
] | 2
|
2017-02-20T14:25:04.000Z
|
2019-12-13T13:58:28.000Z
|
net/net.gyp
|
SlimKatLegacy/android_external_chromium
|
bc611cda58cc18d0dbaa8a7aee05eb3c0742e573
|
[
"BSD-3-Clause"
] | 2
|
2017-07-25T09:37:22.000Z
|
2017-08-04T07:18:56.000Z
|
net/net.gyp
|
SlimKatLegacy/android_external_chromium
|
bc611cda58cc18d0dbaa8a7aee05eb3c0742e573
|
[
"BSD-3-Clause"
] | 2
|
2020-01-12T00:55:53.000Z
|
2020-11-04T06:36:41.000Z
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'net_base',
'type': '<(library)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../crypto/crypto.gyp:crypto',
'../sdch/sdch.gyp:sdch',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/zlib/zlib.gyp:zlib',
'net_resources',
'ssl_false_start_blacklist_process#host',
],
'sources': [
'base/address_family.h',
'base/address_list.cc',
'base/address_list.h',
'base/address_list_net_log_param.cc',
'base/address_list_net_log_param.h',
'base/asn1_util.cc',
'base/auth.cc',
'base/auth.h',
'base/backoff_entry.cc',
'base/backoff_entry.h',
'base/bandwidth_metrics.cc',
'base/bandwidth_metrics.h',
'base/cache_type.h',
'base/capturing_net_log.cc',
'base/capturing_net_log.h',
'base/cert_database.cc',
'base/cert_database.h',
'base/cert_database_mac.cc',
'base/cert_database_nss.cc',
'base/cert_database_openssl.cc',
'base/cert_database_win.cc',
'base/cert_status_flags.cc',
'base/cert_status_flags.h',
'base/cert_verifier.cc',
'base/cert_verifier.h',
'base/cert_verify_result.cc',
'base/cert_verify_result.h',
'base/completion_callback.h',
'base/connection_type_histograms.cc',
'base/connection_type_histograms.h',
'base/cookie_monster.cc',
'base/cookie_monster.h',
'base/cookie_options.h',
'base/cookie_policy.h',
'base/cookie_store.cc',
'base/cookie_store.h',
'base/crypto_module.h',
'base/crypto_module_nss.cc',
'base/crypto_module_openssl.cc',
'base/data_url.cc',
'base/data_url.h',
'base/directory_lister.cc',
'base/directory_lister.h',
'base/dns_reload_timer.cc',
'base/dns_reload_timer.h',
'base/dnssec_chain_verifier.cc',
'base/dnssec_chain_verifier.h',
'base/dnssec_keyset.cc',
'base/dnssec_keyset.h',
'base/dns_util.cc',
'base/dns_util.h',
'base/dnsrr_resolver.cc',
'base/dnsrr_resolver.h',
'base/escape.cc',
'base/escape.h',
'base/escape_icu.cc',
'base/ev_root_ca_metadata.cc',
'base/ev_root_ca_metadata.h',
'base/file_stream.h',
'base/file_stream_posix.cc',
'base/file_stream_win.cc',
'base/filter.cc',
'base/filter.h',
'base/gzip_filter.cc',
'base/gzip_filter.h',
'base/gzip_header.cc',
'base/gzip_header.h',
'base/host_cache.cc',
'base/host_cache.h',
'base/host_mapping_rules.cc',
'base/host_mapping_rules.h',
'base/host_port_pair.cc',
'base/host_port_pair.h',
'base/host_resolver.cc',
'base/host_resolver.h',
'base/host_resolver_impl.cc',
'base/host_resolver_impl.h',
'base/host_resolver_proc.cc',
'base/host_resolver_proc.h',
'base/io_buffer.cc',
'base/io_buffer.h',
'base/ip_endpoint.cc',
'base/ip_endpoint.h',
'base/keygen_handler.cc',
'base/keygen_handler.h',
'base/keygen_handler_mac.cc',
'base/keygen_handler_nss.cc',
'base/keygen_handler_openssl.cc',
'base/keygen_handler_win.cc',
'base/listen_socket.cc',
'base/listen_socket.h',
'base/load_flags.h',
'base/load_flags_list.h',
'base/load_states.h',
'base/mapped_host_resolver.cc',
'base/mapped_host_resolver.h',
'base/mime_sniffer.cc',
'base/mime_sniffer.h',
'base/mime_util.cc',
'base/mime_util.h',
# TODO(eroman): move this into its own test-support target.
'base/mock_host_resolver.cc',
'base/mock_host_resolver.h',
'base/net_error_list.h',
'base/net_errors.cc',
'base/net_errors.h',
'base/net_errors_posix.cc',
'base/net_errors_win.cc',
'base/net_log.cc',
'base/net_log.h',
'base/net_log_event_type_list.h',
'base/net_log_source_type_list.h',
'base/net_module.cc',
'base/net_module.h',
'base/net_switches.cc',
'base/net_switches.h',
'base/net_util.cc',
'base/net_util.h',
'base/net_util_posix.cc',
'base/net_util_win.cc',
'base/network_change_notifier.cc',
'base/network_change_notifier.h',
'base/network_change_notifier_linux.cc',
'base/network_change_notifier_linux.h',
'base/network_change_notifier_mac.cc',
'base/network_change_notifier_mac.h',
'base/network_change_notifier_netlink_linux.cc',
'base/network_change_notifier_netlink_linux.h',
'base/network_change_notifier_win.cc',
'base/network_change_notifier_win.h',
'base/network_config_watcher_mac.cc',
'base/network_config_watcher_mac.h',
'base/network_delegate.cc',
'base/network_delegate.h',
'base/nss_memio.c',
'base/nss_memio.h',
'base/openssl_memory_private_key_store.cc',
'base/openssl_private_key_store.h',
'base/pem_tokenizer.cc',
'base/pem_tokenizer.h',
'base/platform_mime_util.h',
# TODO(tc): gnome-vfs? xdgmime? /etc/mime.types?
'base/platform_mime_util_linux.cc',
'base/platform_mime_util_mac.cc',
'base/platform_mime_util_win.cc',
'base/registry_controlled_domain.cc',
'base/registry_controlled_domain.h',
'base/scoped_cert_chain_context.h',
'base/sdch_filter.cc',
'base/sdch_filter.h',
'base/sdch_manager.cc',
'base/sdch_manager.h',
'base/ssl_cert_request_info.cc',
'base/ssl_cert_request_info.h',
'base/ssl_cipher_suite_names.cc',
'base/ssl_cipher_suite_names.h',
'base/ssl_client_auth_cache.cc',
'base/ssl_client_auth_cache.h',
'base/ssl_config_service.cc',
'base/ssl_config_service.h',
'base/ssl_config_service_defaults.cc',
'base/ssl_config_service_defaults.h',
'base/ssl_false_start_blacklist.cc',
'base/ssl_info.cc',
'base/ssl_info.h',
'base/static_cookie_policy.cc',
'base/static_cookie_policy.h',
'base/test_root_certs.cc',
'base/test_root_certs.h',
'base/test_root_certs_mac.cc',
'base/test_root_certs_nss.cc',
'base/test_root_certs_openssl.cc',
'base/test_root_certs_win.cc',
'base/transport_security_state.cc',
'base/transport_security_state.h',
'base/sys_addrinfo.h',
'base/sys_byteorder.h',
'base/upload_data.cc',
'base/upload_data.h',
'base/upload_data_stream.cc',
'base/upload_data_stream.h',
'base/winsock_init.cc',
'base/winsock_init.h',
'base/winsock_util.cc',
'base/winsock_util.h',
'base/x509_certificate.cc',
'base/x509_certificate.h',
'base/x509_certificate_mac.cc',
'base/x509_certificate_nss.cc',
'base/x509_certificate_openssl.cc',
'base/x509_certificate_win.cc',
'base/x509_cert_types.cc',
'base/x509_cert_types.h',
'base/x509_cert_types_mac.cc',
'base/x509_openssl_util.cc',
'base/x509_openssl_util.h',
'third_party/mozilla_security_manager/nsKeygenHandler.cpp',
'third_party/mozilla_security_manager/nsKeygenHandler.h',
'third_party/mozilla_security_manager/nsNSSCertificateDB.cpp',
'third_party/mozilla_security_manager/nsNSSCertificateDB.h',
'third_party/mozilla_security_manager/nsNSSCertTrust.cpp',
'third_party/mozilla_security_manager/nsNSSCertTrust.h',
'third_party/mozilla_security_manager/nsPKCS12Blob.cpp',
'third_party/mozilla_security_manager/nsPKCS12Blob.h',
],
'export_dependent_settings': [
'../base/base.gyp:base',
],
'actions': [
{
'action_name': 'ssl_false_start_blacklist',
'inputs': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)ssl_false_start_blacklist_process<(EXECUTABLE_SUFFIX)',
'base/ssl_false_start_blacklist.txt',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/net/base/ssl_false_start_blacklist_data.cc',
],
'action':
['<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)ssl_false_start_blacklist_process<(EXECUTABLE_SUFFIX)',
'base/ssl_false_start_blacklist.txt',
'<(SHARED_INTERMEDIATE_DIR)/net/base/ssl_false_start_blacklist_data.cc',
],
'message': 'Generating SSL False Start blacklist',
'process_outputs_as_sources': 1,
},
],
'conditions': [
[ 'OS == "linux" or OS == "freebsd" or OS == "openbsd"', {
'dependencies': [
'../build/linux/system.gyp:gconf',
'../build/linux/system.gyp:gdk',
'../build/linux/system.gyp:libresolv',
],
'conditions': [
['use_openssl==1', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
],
}, { # else: not using openssl. Use NSS.
'dependencies': [
'../build/linux/system.gyp:nss',
],
}],
],
},
{ # else: OS is not in the above list
'sources!': [
'base/cert_database_nss.cc',
'base/crypto_module_nss.cc',
'base/keygen_handler_nss.cc',
'base/test_root_certs_nss.cc',
'base/x509_certificate_nss.cc',
'third_party/mozilla_security_manager/nsKeygenHandler.cpp',
'third_party/mozilla_security_manager/nsKeygenHandler.h',
'third_party/mozilla_security_manager/nsNSSCertificateDB.cpp',
'third_party/mozilla_security_manager/nsNSSCertificateDB.h',
'third_party/mozilla_security_manager/nsNSSCertTrust.cpp',
'third_party/mozilla_security_manager/nsNSSCertTrust.h',
'third_party/mozilla_security_manager/nsPKCS12Blob.cpp',
'third_party/mozilla_security_manager/nsPKCS12Blob.h',
],
},
],
[ 'use_openssl==1', {
'sources!': [
'base/cert_database_nss.cc',
'base/crypto_module_nss.cc',
'base/dnssec_keyset.cc',
'base/dnssec_keyset.h',
'base/keygen_handler_nss.cc',
'base/nss_memio.c',
'base/nss_memio.h',
'base/test_root_certs_nss.cc',
'base/x509_certificate_nss.cc',
'third_party/mozilla_security_manager/nsKeygenHandler.cpp',
'third_party/mozilla_security_manager/nsKeygenHandler.h',
'third_party/mozilla_security_manager/nsNSSCertificateDB.cpp',
'third_party/mozilla_security_manager/nsNSSCertificateDB.h',
'third_party/mozilla_security_manager/nsNSSCertTrust.cpp',
'third_party/mozilla_security_manager/nsNSSCertTrust.h',
'third_party/mozilla_security_manager/nsPKCS12Blob.cpp',
'third_party/mozilla_security_manager/nsPKCS12Blob.h',
],
},
{ # else: not using openssl.
'sources!': [
'base/cert_database_openssl.cc',
'base/crypto_module_openssl.cc',
'base/keygen_handler_openssl.cc',
'base/openssl_memory_private_key_store.cc',
'base/openssl_private_key_store.h',
'base/test_root_certs_openssl.cc',
'base/x509_certificate_openssl.cc',
'base/x509_openssl_util.cc',
'base/x509_openssl_util.h',
],
},
],
[ 'OS == "win"', {
'dependencies': [
'../third_party/nss/nss.gyp:nss',
'tld_cleanup',
],
},
{ # else: OS != "win"
'dependencies': [
'../third_party/libevent/libevent.gyp:libevent',
],
'sources!': [
'base/winsock_init.cc',
'base/winsock_util.cc',
],
},
],
[ 'OS == "mac"', {
'dependencies': [
'../third_party/nss/nss.gyp:nss',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SystemConfiguration.framework',
'$(SDKROOT)/usr/lib/libresolv.dylib',
]
},
},
],
],
},
{
'target_name': 'net',
'type': '<(library)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../crypto/crypto.gyp:crypto',
'../sdch/sdch.gyp:sdch',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/zlib/zlib.gyp:zlib',
'net_base',
'net_resources',
],
'sources': [
'disk_cache/addr.cc',
'disk_cache/addr.h',
'disk_cache/backend_impl.cc',
'disk_cache/backend_impl.h',
'disk_cache/bitmap.cc',
'disk_cache/bitmap.h',
'disk_cache/block_files.cc',
'disk_cache/block_files.h',
'disk_cache/cache_util.h',
'disk_cache/cache_util_posix.cc',
'disk_cache/cache_util_win.cc',
'disk_cache/disk_cache.h',
'disk_cache/disk_format.cc',
'disk_cache/disk_format.h',
'disk_cache/entry_impl.cc',
'disk_cache/entry_impl.h',
'disk_cache/errors.h',
'disk_cache/eviction.cc',
'disk_cache/eviction.h',
'disk_cache/experiments.h',
'disk_cache/net_log_parameters.cc',
'disk_cache/net_log_parameters.h',
'disk_cache/file.cc',
'disk_cache/file.h',
'disk_cache/file_block.h',
'disk_cache/file_lock.cc',
'disk_cache/file_lock.h',
'disk_cache/file_posix.cc',
'disk_cache/file_win.cc',
'disk_cache/hash.cc',
'disk_cache/hash.h',
'disk_cache/histogram_macros.h',
'disk_cache/in_flight_backend_io.cc',
'disk_cache/in_flight_backend_io.h',
'disk_cache/in_flight_io.cc',
'disk_cache/in_flight_io.h',
'disk_cache/mapped_file.h',
'disk_cache/mapped_file_posix.cc',
'disk_cache/mapped_file_win.cc',
'disk_cache/mem_backend_impl.cc',
'disk_cache/mem_backend_impl.h',
'disk_cache/mem_entry_impl.cc',
'disk_cache/mem_entry_impl.h',
'disk_cache/mem_rankings.cc',
'disk_cache/mem_rankings.h',
'disk_cache/rankings.cc',
'disk_cache/rankings.h',
'disk_cache/sparse_control.cc',
'disk_cache/sparse_control.h',
'disk_cache/stats.cc',
'disk_cache/stats.h',
'disk_cache/stats_histogram.cc',
'disk_cache/stats_histogram.h',
'disk_cache/storage_block-inl.h',
'disk_cache/storage_block.h',
'disk_cache/trace.cc',
'disk_cache/trace.h',
'ftp/ftp_auth_cache.cc',
'ftp/ftp_auth_cache.h',
'ftp/ftp_ctrl_response_buffer.cc',
'ftp/ftp_ctrl_response_buffer.h',
'ftp/ftp_directory_listing_parser.cc',
'ftp/ftp_directory_listing_parser.h',
'ftp/ftp_directory_listing_parser_ls.cc',
'ftp/ftp_directory_listing_parser_ls.h',
'ftp/ftp_directory_listing_parser_netware.cc',
'ftp/ftp_directory_listing_parser_netware.h',
'ftp/ftp_directory_listing_parser_vms.cc',
'ftp/ftp_directory_listing_parser_vms.h',
'ftp/ftp_directory_listing_parser_windows.cc',
'ftp/ftp_directory_listing_parser_windows.h',
'ftp/ftp_network_layer.cc',
'ftp/ftp_network_layer.h',
'ftp/ftp_network_session.cc',
'ftp/ftp_network_session.h',
'ftp/ftp_network_transaction.cc',
'ftp/ftp_network_transaction.h',
'ftp/ftp_request_info.h',
'ftp/ftp_response_info.cc',
'ftp/ftp_response_info.h',
'ftp/ftp_server_type_histograms.cc',
'ftp/ftp_server_type_histograms.h',
'ftp/ftp_transaction.h',
'ftp/ftp_transaction_factory.h',
'ftp/ftp_util.cc',
'ftp/ftp_util.h',
'http/des.cc',
'http/des.h',
'http/disk_cache_based_ssl_host_info.cc',
'http/disk_cache_based_ssl_host_info.h',
'http/http_alternate_protocols.cc',
'http/http_alternate_protocols.h',
'http/http_atom_list.h',
'http/http_auth.cc',
'http/http_auth.h',
'http/http_auth_cache.cc',
'http/http_auth_cache.h',
'http/http_auth_controller.cc',
'http/http_auth_controller.h',
'http/http_auth_filter.cc',
'http/http_auth_filter.h',
'http/http_auth_filter_win.h',
'http/http_auth_gssapi_posix.cc',
'http/http_auth_gssapi_posix.h',
'http/http_auth_handler.cc',
'http/http_auth_handler.h',
'http/http_auth_handler_basic.cc',
'http/http_auth_handler_basic.h',
'http/http_auth_handler_digest.cc',
'http/http_auth_handler_digest.h',
'http/http_auth_handler_factory.cc',
'http/http_auth_handler_factory.h',
'http/http_auth_handler_negotiate.h',
'http/http_auth_handler_negotiate.cc',
'http/http_auth_handler_ntlm.cc',
'http/http_auth_handler_ntlm.h',
'http/http_auth_handler_ntlm_portable.cc',
'http/http_auth_handler_ntlm_win.cc',
'http/http_auth_sspi_win.cc',
'http/http_auth_sspi_win.h',
'http/http_basic_stream.cc',
'http/http_basic_stream.h',
'http/http_byte_range.cc',
'http/http_byte_range.h',
'http/http_cache.cc',
'http/http_cache.h',
'http/http_cache_transaction.cc',
'http/http_cache_transaction.h',
'http/http_chunked_decoder.cc',
'http/http_chunked_decoder.h',
'http/http_net_log_params.cc',
'http/http_net_log_params.h',
'http/http_network_layer.cc',
'http/http_network_layer.h',
'http/http_network_session.cc',
'http/http_network_session.h',
'http/http_network_session_peer.cc',
'http/http_network_session_peer.h',
'http/http_network_transaction.cc',
'http/http_network_transaction.h',
'http/http_request_headers.cc',
'http/http_request_headers.h',
'http/http_request_info.cc',
'http/http_request_info.h',
'http/http_response_body_drainer.cc',
'http/http_response_body_drainer.h',
'http/http_response_headers.cc',
'http/http_response_headers.h',
'http/http_response_info.cc',
'http/http_response_info.h',
'http/http_stream.h',
'http/http_stream_factory.cc',
'http/http_stream_factory.h',
'http/http_stream_factory_impl.cc',
'http/http_stream_factory_impl.h',
'http/http_stream_factory_impl_job.cc',
'http/http_stream_factory_impl_job.h',
'http/http_stream_factory_impl_request.cc',
'http/http_stream_factory_impl_request.h',
'http/http_stream_parser.cc',
'http/http_stream_parser.h',
'http/http_transaction.h',
'http/http_transaction_factory.h',
'http/url_security_manager.h',
'http/url_security_manager.cc',
'http/url_security_manager_posix.cc',
'http/url_security_manager_win.cc',
'http/http_proxy_client_socket.cc',
'http/http_proxy_client_socket.h',
'http/http_proxy_client_socket_pool.cc',
'http/http_proxy_client_socket_pool.h',
'http/http_proxy_utils.cc',
'http/http_proxy_utils.h',
'http/http_util.cc',
'http/http_util_icu.cc',
'http/http_util.h',
'http/http_vary_data.cc',
'http/http_vary_data.h',
'http/http_version.h',
'http/md4.cc',
'http/md4.h',
'http/partial_data.cc',
'http/partial_data.h',
'http/proxy_client_socket.h',
'ocsp/nss_ocsp.cc',
'ocsp/nss_ocsp.h',
'proxy/init_proxy_resolver.cc',
'proxy/init_proxy_resolver.h',
'proxy/multi_threaded_proxy_resolver.cc',
'proxy/multi_threaded_proxy_resolver.h',
'proxy/polling_proxy_config_service.cc',
'proxy/polling_proxy_config_service.h',
'proxy/proxy_bypass_rules.cc',
'proxy/proxy_bypass_rules.h',
'proxy/proxy_config.cc',
'proxy/proxy_config.h',
'proxy/proxy_config_service.h',
'proxy/proxy_config_service_fixed.cc',
'proxy/proxy_config_service_fixed.h',
'proxy/proxy_config_service_linux.cc',
'proxy/proxy_config_service_linux.h',
'proxy/proxy_config_service_mac.cc',
'proxy/proxy_config_service_mac.h',
'proxy/proxy_config_service_win.cc',
'proxy/proxy_config_service_win.h',
'proxy/proxy_info.cc',
'proxy/proxy_info.h',
'proxy/proxy_list.cc',
'proxy/proxy_list.h',
'proxy/proxy_resolver.h',
'proxy/proxy_resolver_js_bindings.cc',
'proxy/proxy_resolver_js_bindings.h',
'proxy/proxy_resolver_mac.cc',
'proxy/proxy_resolver_mac.h',
'proxy/proxy_resolver_request_context.h',
'proxy/proxy_resolver_script.h',
'proxy/proxy_resolver_script_data.cc',
'proxy/proxy_resolver_script_data.h',
'proxy/proxy_resolver_v8.cc',
'proxy/proxy_resolver_v8.h',
'proxy/proxy_resolver_winhttp.cc',
'proxy/proxy_resolver_winhttp.h',
'proxy/proxy_retry_info.h',
'proxy/proxy_script_fetcher.h',
'proxy/proxy_script_fetcher_impl.cc',
'proxy/proxy_script_fetcher_impl.h',
'proxy/proxy_server.cc',
'proxy/proxy_server_mac.cc',
'proxy/proxy_server.h',
'proxy/proxy_service.cc',
'proxy/proxy_service.h',
'proxy/sync_host_resolver_bridge.cc',
'proxy/sync_host_resolver_bridge.h',
'socket/client_socket.cc',
'socket/client_socket.h',
'socket/client_socket_factory.cc',
'socket/client_socket_factory.h',
'socket/client_socket_handle.cc',
'socket/client_socket_handle.h',
'socket/client_socket_pool.h',
'socket/client_socket_pool.cc',
'socket/client_socket_pool_base.cc',
'socket/client_socket_pool_base.h',
'socket/client_socket_pool_histograms.cc',
'socket/client_socket_pool_histograms.h',
'socket/client_socket_pool_manager.cc',
'socket/client_socket_pool_manager.h',
'socket/dns_cert_provenance_checker.cc',
'socket/dns_cert_provenance_checker.h',
'socket/nss_ssl_util.cc',
'socket/nss_ssl_util.h',
'socket/server_socket.h',
'socket/socket.h',
'socket/socks5_client_socket.cc',
'socket/socks5_client_socket.h',
'socket/socks_client_socket.cc',
'socket/socks_client_socket.h',
'socket/socks_client_socket_pool.cc',
'socket/socks_client_socket_pool.h',
'socket/ssl_client_socket.cc',
'socket/ssl_client_socket.h',
'socket/ssl_client_socket_mac.cc',
'socket/ssl_client_socket_mac.h',
'socket/ssl_client_socket_nss.cc',
'socket/ssl_client_socket_nss.h',
'socket/ssl_client_socket_openssl.cc',
'socket/ssl_client_socket_openssl.h',
'socket/ssl_client_socket_pool.cc',
'socket/ssl_client_socket_pool.h',
'socket/ssl_client_socket_win.cc',
'socket/ssl_client_socket_win.h',
'socket/ssl_error_params.cc',
'socket/ssl_error_params.h',
'socket/ssl_server_socket.h',
'socket/ssl_server_socket_nss.cc',
'socket/ssl_server_socket_nss.h',
'socket/ssl_server_socket_openssl.cc',
'socket/ssl_host_info.cc',
'socket/ssl_host_info.h',
'socket/tcp_client_socket.cc',
'socket/tcp_client_socket.h',
'socket/tcp_client_socket_libevent.cc',
'socket/tcp_client_socket_libevent.h',
'socket/tcp_client_socket_win.cc',
'socket/tcp_client_socket_win.h',
'socket/tcp_server_socket.h',
'socket/tcp_server_socket_libevent.cc',
'socket/tcp_server_socket_libevent.h',
'socket/tcp_server_socket_win.cc',
'socket/tcp_server_socket_win.h',
'socket/transport_client_socket_pool.cc',
'socket/transport_client_socket_pool.h',
'socket_stream/socket_stream.cc',
'socket_stream/socket_stream.h',
'socket_stream/socket_stream_job.cc',
'socket_stream/socket_stream_job.h',
'socket_stream/socket_stream_job_manager.cc',
'socket_stream/socket_stream_job_manager.h',
'socket_stream/socket_stream_metrics.cc',
'socket_stream/socket_stream_metrics.h',
'spdy/spdy_bitmasks.h',
'spdy/spdy_frame_builder.cc',
'spdy/spdy_frame_builder.h',
'spdy/spdy_framer.cc',
'spdy/spdy_framer.h',
'spdy/spdy_http_stream.cc',
'spdy/spdy_http_stream.h',
'spdy/spdy_http_utils.cc',
'spdy/spdy_http_utils.h',
'spdy/spdy_io_buffer.cc',
'spdy/spdy_io_buffer.h',
'spdy/spdy_protocol.h',
'spdy/spdy_proxy_client_socket.cc',
'spdy/spdy_proxy_client_socket.h',
'spdy/spdy_session.cc',
'spdy/spdy_session.h',
'spdy/spdy_session_pool.cc',
'spdy/spdy_session_pool.h',
'spdy/spdy_settings_storage.cc',
'spdy/spdy_settings_storage.h',
'spdy/spdy_stream.cc',
'spdy/spdy_stream.h',
'udp/datagram_client_socket.h',
'udp/datagram_server_socket.h',
'udp/datagram_socket.h',
'udp/udp_client_socket.cc',
'udp/udp_client_socket.h',
'udp/udp_server_socket.cc',
'udp/udp_server_socket.h',
'udp/udp_socket.h',
'udp/udp_socket_libevent.cc',
'udp/udp_socket_libevent.h',
'udp/udp_socket_win.cc',
'udp/udp_socket_win.h',
'url_request/https_prober.h',
'url_request/https_prober.cc',
'url_request/url_request.cc',
'url_request/url_request.h',
'url_request/url_request_about_job.cc',
'url_request/url_request_about_job.h',
'url_request/url_request_context.cc',
'url_request/url_request_context.h',
'url_request/url_request_context_getter.cc',
'url_request/url_request_context_getter.h',
'url_request/url_request_context_storage.cc',
'url_request/url_request_context_storage.h',
'url_request/url_request_data_job.cc',
'url_request/url_request_data_job.h',
'url_request/url_request_error_job.cc',
'url_request/url_request_error_job.h',
'url_request/url_request_file_dir_job.cc',
'url_request/url_request_file_dir_job.h',
'url_request/url_request_file_job.cc',
'url_request/url_request_file_job.h',
'url_request/url_request_filter.cc',
'url_request/url_request_filter.h',
'url_request/url_request_ftp_job.cc',
'url_request/url_request_ftp_job.h',
'url_request/url_request_http_job.cc',
'url_request/url_request_http_job.h',
'url_request/url_request_job.cc',
'url_request/url_request_job.h',
'url_request/url_request_job_manager.cc',
'url_request/url_request_job_manager.h',
'url_request/url_request_job_tracker.cc',
'url_request/url_request_job_tracker.h',
'url_request/url_request_netlog_params.cc',
'url_request/url_request_netlog_params.h',
'url_request/url_request_redirect_job.cc',
'url_request/url_request_redirect_job.h',
'url_request/url_request_simple_job.cc',
'url_request/url_request_simple_job.h',
'url_request/url_request_status.h',
'url_request/url_request_test_job.cc',
'url_request/url_request_test_job.h',
'url_request/url_request_throttler_entry.cc',
'url_request/url_request_throttler_entry.h',
'url_request/url_request_throttler_entry_interface.h',
'url_request/url_request_throttler_header_adapter.h',
'url_request/url_request_throttler_header_adapter.cc',
'url_request/url_request_throttler_header_interface.h',
'url_request/url_request_throttler_manager.cc',
'url_request/url_request_throttler_manager.h',
'url_request/view_cache_helper.cc',
'url_request/view_cache_helper.h',
'websockets/websocket.cc',
'websockets/websocket.h',
'websockets/websocket_frame_handler.cc',
'websockets/websocket_frame_handler.h',
'websockets/websocket_handshake.cc',
'websockets/websocket_handshake.h',
'websockets/websocket_handshake_draft75.cc',
'websockets/websocket_handshake_draft75.h',
'websockets/websocket_handshake_handler.cc',
'websockets/websocket_handshake_handler.h',
'websockets/websocket_job.cc',
'websockets/websocket_job.h',
'websockets/websocket_net_log_params.cc',
'websockets/websocket_net_log_params.h',
'websockets/websocket_throttle.cc',
'websockets/websocket_throttle.h',
],
'export_dependent_settings': [
'../base/base.gyp:base',
],
'conditions': [
['javascript_engine=="v8"', {
'dependencies': [
'../v8/tools/gyp/v8.gyp:v8',
],
}],
['chromeos==1', {
'sources!': [
'proxy/proxy_config_service_linux.cc',
'proxy/proxy_config_service_linux.h',
],
}],
['use_openssl==1', {
'sources!': [
'ocsp/nss_ocsp.cc',
'ocsp/nss_ocsp.h',
'socket/dns_cert_provenance_check.cc',
'socket/dns_cert_provenance_check.h',
'socket/nss_ssl_util.cc',
'socket/nss_ssl_util.h',
'socket/ssl_client_socket_nss.cc',
'socket/ssl_client_socket_nss.h',
'socket/ssl_server_socket_nss.cc',
'socket/ssl_server_socket_nss.h',
],
},
{ # else !use_openssl: remove the unneeded files
'sources!': [
'socket/ssl_client_socket_openssl.cc',
'socket/ssl_client_socket_openssl.h',
'socket/ssl_server_socket_openssl.cc',
],
},
],
[ 'OS == "linux" or OS == "freebsd" or OS == "openbsd"', {
'dependencies': [
'../build/linux/system.gyp:gconf',
'../build/linux/system.gyp:gdk',
],
'conditions': [
['use_openssl==1', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
],
},
{ # else use_openssl==0, use NSS
'dependencies': [
'../build/linux/system.gyp:nss',
],
}],
],
},
{ # else: OS is not in the above list
'sources!': [
'ocsp/nss_ocsp.cc',
'ocsp/nss_ocsp.h',
],
},
],
[ 'OS == "win"', {
'sources!': [
'http/http_auth_handler_ntlm_portable.cc',
'socket/tcp_client_socket_libevent.cc',
'socket/tcp_server_socket_libevent.cc',
'udp/udp_socket_libevent.cc',
],
'dependencies': [
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:ssl',
'tld_cleanup',
],
},
{ # else: OS != "win"
'dependencies': [
'../third_party/libevent/libevent.gyp:libevent',
],
'sources!': [
'proxy/proxy_resolver_winhttp.cc',
],
},
],
[ 'OS == "mac"', {
'dependencies': [
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:ssl',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SystemConfiguration.framework',
]
},
},
],
],
},
{
'target_name': 'net_unittests',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../crypto/crypto.gyp:crypto',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/zlib/zlib.gyp:zlib',
],
'msvs_guid': 'E99DA267-BE90-4F45-88A1-6919DB2C7567',
'sources': [
'base/address_list_unittest.cc',
'base/backoff_entry_unittest.cc',
'base/cert_database_nss_unittest.cc',
'base/cert_verifier_unittest.cc',
'base/cookie_monster_unittest.cc',
'base/data_url_unittest.cc',
'base/directory_lister_unittest.cc',
'base/dnssec_unittest.cc',
'base/dns_util_unittest.cc',
'base/dnsrr_resolver_unittest.cc',
'base/escape_unittest.cc',
'base/file_stream_unittest.cc',
'base/filter_unittest.cc',
'base/gzip_filter_unittest.cc',
'base/host_cache_unittest.cc',
'base/host_mapping_rules_unittest.cc',
'base/host_resolver_impl_unittest.cc',
'base/ip_endpoint_unittest.cc',
'base/keygen_handler_unittest.cc',
'base/listen_socket_unittest.cc',
'base/listen_socket_unittest.h',
'base/mapped_host_resolver_unittest.cc',
'base/mime_sniffer_unittest.cc',
'base/mime_util_unittest.cc',
'base/mock_filter_context.cc',
'base/mock_filter_context.h',
'base/net_log_unittest.cc',
'base/net_log_unittest.h',
'base/net_util_unittest.cc',
'base/pem_tokenizer_unittest.cc',
'base/registry_controlled_domain_unittest.cc',
'base/run_all_unittests.cc',
'base/sdch_filter_unittest.cc',
'base/ssl_cipher_suite_names_unittest.cc',
'base/ssl_client_auth_cache_unittest.cc',
'base/ssl_config_service_unittest.cc',
'base/ssl_false_start_blacklist_unittest.cc',
'base/static_cookie_policy_unittest.cc',
'base/transport_security_state_unittest.cc',
'base/test_certificate_data.h',
'base/test_completion_callback_unittest.cc',
'base/upload_data_stream_unittest.cc',
'base/x509_certificate_unittest.cc',
'base/x509_cert_types_mac_unittest.cc',
'disk_cache/addr_unittest.cc',
'disk_cache/backend_unittest.cc',
'disk_cache/bitmap_unittest.cc',
'disk_cache/block_files_unittest.cc',
'disk_cache/cache_util_unittest.cc',
'disk_cache/disk_cache_test_base.cc',
'disk_cache/disk_cache_test_base.h',
'disk_cache/entry_unittest.cc',
'disk_cache/mapped_file_unittest.cc',
'disk_cache/storage_block_unittest.cc',
'ftp/ftp_auth_cache_unittest.cc',
'ftp/ftp_ctrl_response_buffer_unittest.cc',
'ftp/ftp_directory_listing_parser_ls_unittest.cc',
'ftp/ftp_directory_listing_parser_netware_unittest.cc',
'ftp/ftp_directory_listing_parser_unittest.cc',
'ftp/ftp_directory_listing_parser_vms_unittest.cc',
'ftp/ftp_directory_listing_parser_windows_unittest.cc',
'ftp/ftp_network_transaction_unittest.cc',
'ftp/ftp_util_unittest.cc',
'http/des_unittest.cc',
'http/http_alternate_protocols_unittest.cc',
'http/http_auth_cache_unittest.cc',
'http/http_auth_controller_unittest.cc',
'http/http_auth_filter_unittest.cc',
'http/http_auth_gssapi_posix_unittest.cc',
'http/http_auth_handler_basic_unittest.cc',
'http/http_auth_handler_digest_unittest.cc',
'http/http_auth_handler_factory_unittest.cc',
'http/http_auth_handler_mock.cc',
'http/http_auth_handler_mock.h',
'http/http_auth_handler_negotiate_unittest.cc',
'http/http_auth_handler_unittest.cc',
'http/http_auth_sspi_win_unittest.cc',
'http/http_auth_unittest.cc',
'http/http_byte_range_unittest.cc',
'http/http_cache_unittest.cc',
'http/http_chunked_decoder_unittest.cc',
'http/http_network_layer_unittest.cc',
'http/http_network_transaction_unittest.cc',
'http/http_proxy_client_socket_pool_unittest.cc',
'http/http_request_headers_unittest.cc',
'http/http_response_body_drainer_unittest.cc',
'http/http_response_headers_unittest.cc',
'http/http_stream_factory_impl_unittest.cc',
'http/http_transaction_unittest.cc',
'http/http_transaction_unittest.h',
'http/http_util_unittest.cc',
'http/http_vary_data_unittest.cc',
'http/mock_allow_url_security_manager.cc',
'http/mock_allow_url_security_manager.h',
'http/mock_gssapi_library_posix.cc',
'http/mock_gssapi_library_posix.h',
'http/mock_sspi_library_win.h',
'http/mock_sspi_library_win.cc',
'http/url_security_manager_unittest.cc',
'proxy/init_proxy_resolver_unittest.cc',
'proxy/multi_threaded_proxy_resolver_unittest.cc',
'proxy/proxy_bypass_rules_unittest.cc',
'proxy/proxy_config_service_linux_unittest.cc',
'proxy/proxy_config_service_win_unittest.cc',
'proxy/proxy_config_unittest.cc',
'proxy/proxy_list_unittest.cc',
'proxy/proxy_resolver_js_bindings_unittest.cc',
'proxy/proxy_resolver_v8_unittest.cc',
'proxy/proxy_script_fetcher_impl_unittest.cc',
'proxy/proxy_server_unittest.cc',
'proxy/proxy_service_unittest.cc',
'proxy/sync_host_resolver_bridge_unittest.cc',
'socket/client_socket_pool_base_unittest.cc',
'socket/deterministic_socket_data_unittest.cc',
'socket/socks5_client_socket_unittest.cc',
'socket/socks_client_socket_pool_unittest.cc',
'socket/socks_client_socket_unittest.cc',
'socket/ssl_client_socket_unittest.cc',
'socket/ssl_client_socket_pool_unittest.cc',
'socket/ssl_server_socket_unittest.cc',
'socket/tcp_server_socket_unittest.cc',
'socket/transport_client_socket_pool_unittest.cc',
'socket/transport_client_socket_unittest.cc',
'socket_stream/socket_stream_metrics_unittest.cc',
'socket_stream/socket_stream_unittest.cc',
'spdy/spdy_framer_test.cc',
'spdy/spdy_http_stream_unittest.cc',
'spdy/spdy_network_transaction_unittest.cc',
'spdy/spdy_protocol_test.cc',
'spdy/spdy_proxy_client_socket_unittest.cc',
'spdy/spdy_session_unittest.cc',
'spdy/spdy_stream_unittest.cc',
'spdy/spdy_test_util.cc',
'spdy/spdy_test_util.h',
'test/python_utils_unittest.cc',
'tools/dump_cache/url_to_filename_encoder.cc',
'tools/dump_cache/url_to_filename_encoder.h',
'tools/dump_cache/url_to_filename_encoder_unittest.cc',
'tools/dump_cache/url_utilities.h',
'tools/dump_cache/url_utilities.cc',
'tools/dump_cache/url_utilities_unittest.cc',
'udp/udp_socket_unittest.cc',
'url_request/url_request_job_tracker_unittest.cc',
'url_request/url_request_throttler_unittest.cc',
'url_request/url_request_unittest.cc',
'url_request/view_cache_helper_unittest.cc',
'websockets/websocket_frame_handler_unittest.cc',
'websockets/websocket_handshake_draft75_unittest.cc',
'websockets/websocket_handshake_handler_unittest.cc',
'websockets/websocket_handshake_unittest.cc',
'websockets/websocket_job_unittest.cc',
'websockets/websocket_net_log_params_unittest.cc',
'websockets/websocket_throttle_unittest.cc',
'websockets/websocket_unittest.cc',
],
'conditions': [
['chromeos==1', {
'sources!': [
'proxy/proxy_config_service_linux_unittest.cc',
],
}],
[ 'OS == "linux" or OS == "freebsd" or OS == "openbsd"', {
'dependencies': [
'../build/linux/system.gyp:gtk',
'../build/linux/system.gyp:nss',
],
},
{ # else: OS is not in the above list
'sources!': [
'base/cert_database_nss_unittest.cc',
],
}
],
[ 'OS == "linux"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
[ 'use_openssl==1', {
# When building for OpenSSL, we need to exclude NSS specific tests.
# TODO(bulach): Add equivalent tests when the underlying
# functionality is ported to OpenSSL.
'sources!': [
'base/cert_database_nss_unittest.cc',
'base/dnssec_unittest.cc',
],
},
{ # else, remove openssl specific tests
'sources!': [
'base/x509_openssl_util_unittest.cc',
],
}
],
[ 'OS == "win"', {
'sources!': [
'http/http_auth_gssapi_posix_unittest.cc',
],
# This is needed to trigger the dll copy step on windows.
# TODO(mark): Specifying this here shouldn't be necessary.
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
],
},
],
],
},
{
'target_name': 'net_perftests',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_perf',
'../testing/gtest.gyp:gtest',
],
'msvs_guid': 'AAC78796-B9A2-4CD9-BF89-09B03E92BF73',
'sources': [
'base/cookie_monster_perftest.cc',
'disk_cache/disk_cache_perftest.cc',
'proxy/proxy_resolver_perftest.cc',
],
'conditions': [
# This is needed to trigger the dll copy step on windows.
# TODO(mark): Specifying this here shouldn't be necessary.
[ 'OS == "win"', {
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
],
},
],
],
},
{
'target_name': 'stress_cache',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
],
'sources': [
'disk_cache/stress_cache.cc',
],
},
{
'target_name': 'tld_cleanup',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../build/temp_gyp/googleurl.gyp:googleurl',
],
'msvs_guid': 'E13045CD-7E1F-4A41-9B18-8D288B2E7B41',
'sources': [
'tools/tld_cleanup/tld_cleanup.cc',
],
},
{
'target_name': 'crash_cache',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
],
'msvs_guid': 'B0EE0599-2913-46A0-A847-A3EC813658D3',
'sources': [
'tools/crash_cache/crash_cache.cc',
],
},
{
'target_name': 'run_testserver',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
'../testing/gtest.gyp:gtest',
],
'msvs_guid': '506F2468-6B1D-48E2-A67C-9D9C6BAC0EC5',
'sources': [
'tools/testserver/run_testserver.cc',
],
},
{
'target_name': 'net_test_support',
'type': '<(library)',
'dependencies': [
'net',
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'base/cert_test_util.cc',
'base/cert_test_util.h',
'base/cookie_monster_store_test.cc',
'base/cookie_monster_store_test.h',
'base/net_test_suite.cc',
'base/net_test_suite.h',
'base/test_completion_callback.cc',
'base/test_completion_callback.h',
'disk_cache/disk_cache_test_util.cc',
'disk_cache/disk_cache_test_util.h',
'proxy/mock_proxy_resolver.cc',
'proxy/mock_proxy_resolver.h',
'proxy/proxy_config_service_common_unittest.cc',
'proxy/proxy_config_service_common_unittest.h',
'socket/socket_test_util.cc',
'socket/socket_test_util.h',
'test/python_utils.cc',
'test/python_utils.h',
'test/test_server.cc',
'test/test_server_posix.cc',
'test/test_server_win.cc',
'test/test_server.h',
'url_request/url_request_test_util.cc',
'url_request/url_request_test_util.h',
],
'conditions': [
['inside_chromium_build==1', {
'dependencies': [
'../chrome/app/policy/cloud_policy_codegen.gyp:cloud_policy_proto_compile',
'../chrome/browser/sync/protocol/sync_proto.gyp:sync_proto',
'../third_party/protobuf/protobuf.gyp:py_proto',
],
}],
['OS == "linux" or OS == "freebsd" or OS == "openbsd"', {
'conditions': [
['use_openssl==1', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
],
}, {
'dependencies': [
'../build/linux/system.gyp:nss',
],
}],
],
}],
['OS == "linux"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
],
},
{
'target_name': 'net_resources',
'type': 'none',
'msvs_guid': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/net',
},
'actions': [
{
'action_name': 'net_resources',
'variables': {
'grit_grd_file': 'base/net_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
'target_name': 'fetch_client',
'type': 'executable',
'dependencies': [
'net',
'../base/base.gyp:base',
'../testing/gtest.gyp:gtest',
],
'msvs_guid': 'DABB8796-B9A2-4CD9-BF89-09B03E92B123',
'sources': [
'tools/fetch/fetch_client.cc',
],
},
{
'target_name': 'fetch_server',
'type': 'executable',
'dependencies': [
'net',
'../base/base.gyp:base',
'../testing/gtest.gyp:gtest',
],
'msvs_guid': 'DABB8796-B9A2-4CD9-BF89-09B03E92B124',
'sources': [
'tools/fetch/fetch_server.cc',
'tools/fetch/http_listen_socket.cc',
'tools/fetch/http_listen_socket.h',
'tools/fetch/http_server.cc',
'tools/fetch/http_server.h',
'tools/fetch/http_server_request_info.cc',
'tools/fetch/http_server_request_info.h',
'tools/fetch/http_server_response_info.cc',
'tools/fetch/http_server_response_info.h',
'tools/fetch/http_session.cc',
'tools/fetch/http_session.h',
],
},
{
'target_name': 'http_server',
'type': '<(library)',
'dependencies': [
'net',
'../base/base.gyp:base',
'../testing/gtest.gyp:gtest',
],
'msvs_guid': 'FCB894A4-CC6C-48C2-B495-52C80527E9BE',
'sources': [
'server/http_server.cc',
'server/http_server.h',
'server/http_server_request_info.cc',
'server/http_server_request_info.h',
],
},
{
'target_name': 'dnssec_chain_verify',
'type': 'executable',
'dependencies': [
'net_base',
],
'sources': [
'tools/dnssec_chain_verify/dnssec_chain_verify.cc',
]
},
{
'target_name': 'ssl_false_start_blacklist_process',
'type': 'executable',
'toolsets': ['host'],
'include_dirs': [
'..',
],
'sources': [
'base/ssl_false_start_blacklist_process.cc',
],
},
],
'conditions': [
['OS=="linux"', {
'targets': [
{
'target_name': 'flip_in_mem_edsm_server',
'type': 'executable',
'cflags': [
'-Wno-deprecated',
],
'dependencies': [
'../base/base.gyp:base',
'net.gyp:net',
'../third_party/openssl/openssl.gyp:openssl',
],
'sources': [
'tools/dump_cache/url_to_filename_encoder.cc',
'tools/dump_cache/url_to_filename_encoder.h',
'tools/dump_cache/url_utilities.h',
'tools/dump_cache/url_utilities.cc',
'tools/flip_server/acceptor_thread.h',
'tools/flip_server/acceptor_thread.cc',
'tools/flip_server/balsa_enums.h',
'tools/flip_server/balsa_frame.cc',
'tools/flip_server/balsa_frame.h',
'tools/flip_server/balsa_headers.cc',
'tools/flip_server/balsa_headers.h',
'tools/flip_server/balsa_headers_token_utils.cc',
'tools/flip_server/balsa_headers_token_utils.h',
'tools/flip_server/balsa_visitor_interface.h',
'tools/flip_server/buffer_interface.h',
'tools/flip_server/constants.h',
'tools/flip_server/create_listener.cc',
'tools/flip_server/create_listener.h',
'tools/flip_server/epoll_server.cc',
'tools/flip_server/epoll_server.h',
'tools/flip_server/flip_config.cc',
'tools/flip_server/flip_config.h',
'tools/flip_server/flip_in_mem_edsm_server.cc',
'tools/flip_server/http_interface.cc',
'tools/flip_server/http_interface.h',
'tools/flip_server/http_message_constants.cc',
'tools/flip_server/http_message_constants.h',
'tools/flip_server/loadtime_measurement.h',
'tools/flip_server/mem_cache.h',
'tools/flip_server/mem_cache.cc',
'tools/flip_server/porting.txt',
'tools/flip_server/output_ordering.cc',
'tools/flip_server/output_ordering.h',
'tools/flip_server/ring_buffer.cc',
'tools/flip_server/ring_buffer.h',
'tools/flip_server/simple_buffer.cc',
'tools/flip_server/simple_buffer.h',
'tools/flip_server/sm_connection.cc',
'tools/flip_server/sm_connection.h',
'tools/flip_server/sm_interface.h',
'tools/flip_server/split.h',
'tools/flip_server/split.cc',
'tools/flip_server/spdy_ssl.cc',
'tools/flip_server/spdy_ssl.h',
'tools/flip_server/spdy_interface.cc',
'tools/flip_server/spdy_interface.h',
'tools/flip_server/spdy_util.cc',
'tools/flip_server/spdy_util.h',
'tools/flip_server/streamer_interface.cc',
'tools/flip_server/streamer_interface.h',
'tools/flip_server/string_piece_utils.h',
'tools/flip_server/thread.h',
'tools/flip_server/url_to_filename_encoder.h',
'tools/flip_server/url_utilities.h',
],
},
]
}],
['OS=="win"', {
'targets': [
{
# TODO(port): dump_cache is still Windows-specific.
'target_name': 'dump_cache',
'type': 'executable',
'dependencies': [
'net',
'net_test_support',
'../base/base.gyp:base',
],
'sources': [
'tools/dump_cache/cache_dumper.cc',
'tools/dump_cache/cache_dumper.h',
'tools/dump_cache/dump_cache.cc',
'tools/dump_cache/dump_files.cc',
'tools/dump_cache/upgrade.cc',
'tools/dump_cache/url_to_filename_encoder.cc',
'tools/dump_cache/url_to_filename_encoder.h',
'tools/dump_cache/url_utilities.h',
'tools/dump_cache/url_utilities.cc',
],
},
],
}],
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| 37.314187
| 104
| 0.595764
|
de4ed9b012b731199a76ec689ab4e3afd2407790
| 9,964
|
py
|
Python
|
src/m4_functions_vs_methods.py
|
thuerje/02-ObjectsFunctionsAndMethods
|
f2eb90f0e344f12883d5b11a2d3242c22b10c100
|
[
"MIT"
] | null | null | null |
src/m4_functions_vs_methods.py
|
thuerje/02-ObjectsFunctionsAndMethods
|
f2eb90f0e344f12883d5b11a2d3242c22b10c100
|
[
"MIT"
] | null | null | null |
src/m4_functions_vs_methods.py
|
thuerje/02-ObjectsFunctionsAndMethods
|
f2eb90f0e344f12883d5b11a2d3242c22b10c100
|
[
"MIT"
] | null | null | null |
"""
Demonstrates using (calling) FUNCTIONS and using (calling) METHODS:
-- what is similar, and
-- how they differ.
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Amanda Stouder,
their colleagues and Jess Thuer.
""" # DONE PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
#
# DONE
# READ this comment, ASKING QUESTIONS as needed to understand it.
#
# Part 1: CONSTRUCTING objects, applying ** METHODS ** to them:
#
# For objects that are CONSTRUCTED, we use the DOT notation
# to ask them to do things or to change their characteristics.
# For example:
# nadia = rg.SimpleTurtle()
# nadia.pen = rg.Pen('blue', 1)
# nadia.forward(100)
#
# In the above example:
#
# pen is an INSTANCE VARIABLE (aka DATA ATTRIBUTE, FIELD)
# of SimpleTurtles. It is a CHARACTERISTIC of
# SimpleTurtles. Each SimpleTurtle has its own VALUE
# of that characteristic.
#
# forward is a METHOD (aka FUNCTION ATTRIBUTE)
# of SimpleTurtles. It is something that any
# SimpleTurtle can DO. It is just like a FUNCTION
# except for details about the way that it is called.
#
# The statement
# nadia.forward(100)
# CALLS the forward METHOD on nadia, sending that method the number 100
# and making the method run (execute).
#
# Part 2: Defining and calling ** FUNCTIONS **.
#
# We ** define ** FUNCTIONS to give NAMES to blocks of code
# (note FUNCTIONS and METHODS are different).
# For example:
#
# def turtle3():
# maja = rg.SimpleTurtle()
# maja.pen = rg.Pen('green', 10)
# maja.paint_bucket = rg.PaintBucket('black')
# ...
#
# We ** call ** FUNCTIONS (that is, we make them run)
# in a way that is similar to how we call METHODS,
# but WITHOUT the DOT notation. For example
# def main():
# turtle3()
#
# Run this module and review the existing code.
# Can you identify the functions calls and methods calls?
#
# When you believe you understand the differences and similarities between
# calling a FUNCTION and calling a METHOD, change the above TO-DO to DONE.
#
###############################################################################
import rosegraphics as rg
def main():
"""
Makes a TurtleWindow,
calls the other functions in this module to test/demo them,
and waits for the user to click anywhere in the window to close it.
"""
window = rg.TurtleWindow()
jump_and_move_turtle(100, 50, 200, -100)
turtle = rg.SimpleTurtle('square')
draw_many_squares(turtle, 3, 75, 15)
turtle3('blue',5)
try_methods()
try_functions()
try_methods_and_functions()
###########################################################################
# When the TODOs ask you to test YOUR code, put YOUR tests below this:
###########################################################################
window.close_on_mouse_click() # Your code must be ABOVE this statement
def jump_and_move_turtle(x1, y1, x2, y2):
"""
Constructs a thick, slow, magenta SimpleTurtle.
Jumps that SimpleTurtle (without drawing) to (x1, y1),
then moves that Turtle (while drawing) to (x2, y2).
"""
# -------------------------------------------------------------------------
# Students:
# Do NOT touch this function - it has no TO-DO in it.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the other problems.
# -------------------------------------------------------------------------
jumper = rg.SimpleTurtle()
jumper.pen = rg.Pen('magenta', 20)
jumper.speed = 3
jumper.pen_up()
jumper.go_to(rg.Point(x1, y1))
jumper.pen_down()
jumper.go_to(rg.Point(x2, y2))
def draw_many_squares(my_turtle, number_of_squares, size, twist):
"""
Makes the given SimpleTurtle object draw:
-- many squares (how many? answer: NUMBER_OF_SQUARES)
where each square:
-- has the same size (what size? answer: SIZE)
and each square is:
-- "twisted" a bit from the previous one (how much? TWIST degrees)
NOTE: The 3 lines below that begin with :type are called
"type hints". They make the "dot" trick work more effectively.
We will include them in function specifications routinely.
:type my_turtle: rg.SimpleTurtle
:type number_of_squares: int
:type size: int
:type twist: int
"""
# -------------------------------------------------------------------------
# Students:
# Do NOT touch this function - it has no TO-DO in it.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the other problems.
# -------------------------------------------------------------------------
old_speed = my_turtle.speed
my_turtle.speed = 10
for _ in range(number_of_squares):
my_turtle.draw_square(size)
my_turtle.left(twist)
my_turtle.speed = old_speed
def turtle3(color, thickness):
"""
Constructs a classic SimpleTurtle and asks it to draw a
"ball on pole" shape, using a Pen with the give color and thickness.
"""
# -------------------------------------------------------------------------
# Students:
# Do NOT touch this function - it has no TO-DO in it.
# -------------------------------------------------------------------------
maja = rg.SimpleTurtle()
maja.pen = rg.Pen(color, thickness)
maja.paint_bucket = rg.PaintBucket('black')
maja.speed = 15
maja.right(135)
maja.forward(300)
maja.begin_fill()
maja.draw_circle(50)
maja.end_fill()
def try_methods():
"""
Constructs a SimpleTurtle and sets its pen to a new rg.Pen
that is 'brown' with thickness 5.
Then makes the SimpleTurtle move as follows (in the order listed):
-- forward 150 units
-- left 90 degrees
-- forward 50 units
-- backward 100 units
"""
st = rg.SimpleTurtle('turtle')
st.pen = rg.Pen('brown',5)
st.forward(150)
st.left(90)
st.forward(50)
st.backward(100)
###########################################################################
# DONE Implement and test this function, per its doc-string above.
# (To test it, put a statement in main that calls this function.)
###########################################################################
def try_functions():
# IMPORTANT: Read the NOTE below before you try to solve this TO-DO!
"""
Causes several SimpleTurtles to do the following:
-- One jumps to (200, 100), then moves (while drawing) to (300, 30)
-- One jumps to (100, 200), then moves (while drawing) to (0, 0)
-- One jumps to (-50, 50), then moves (while drawing) to (100, 100)
"""
jump_and_move_turtle(200, 100, 300, 30)
jump_and_move_turtle(100, 200, 0, 0)
jump_and_move_turtle(-50, 50, 100, 100)
###########################################################################
# DONE Implement and test this function, per its doc-string above.
# (To test it, put a statement in main that calls this function.)
#
# NOTE: This function requires
# ** exactly 3 lines **
# If you think it needs more, ** ASK FOR HELP. **
# HINT: see jump_and_move_turtle above.
#
###########################################################################
def try_methods_and_functions():
# IMPORTANT: Read the NOTE below before you try to solve this TO-DO!
"""
Constructs a SimpleTurtle and sets its pen to a new rg.Pen
that is 'blue' with thickness 5.
Then makes the SimpleTurtle do the following (in the order listed):
1. Go backward 150 units.
2. Change its speed to 1 (slowest).
Draw 2 squares whose size (width and height) are 100,
each "twisted" from the previous by 30 degrees.
3. Change its speed to 5 (faster).
Change its Pen's color to 'red'.
Draw 10 squares whose size (width and height) are 50,
each "twisted" from the previous by 15 degrees.
4. Change its speed to 100 (about the fastest possible).
Change its Pen's thickness to 35.
Draw 8 squares whose size (width and height) are 300,
each "twisted" from the previous by 60 degrees.
5. Changes its Pen to be a NEW Pen whose color is 'black'
and whose thickness is 3.
6. Goes backward 200 units.
7. Draw a CIRCLE whose radius is 30.
8. Draw a SQUARE whose sides are each of length 50.
"""
st2 = rg.SimpleTurtle('turtle')
st2.pen = rg.Pen('blue',5)
st2.backward(150)
st2.speed = 1
draw_many_squares(st2, 2, 100, 30)
st2.speed = 5
st2.pen = rg.Pen('red',5)
draw_many_squares(st2, 10, 50, 15)
st2.speed = 100
st2.pen_thickness = rg.Pen('red',35)
draw_many_squares(st2, 8, 300, 60)
st2.pen2 = rg.Pen('black',3)
st2.backward(200)
st2.draw_circle(30)
st2.draw_square(50)
###########################################################################
# DONE Implement and test this function, per its doc-string above.
# (To test it, put a statement in main that calls this function.)
#
# NOTE: This function should ** CALL ** the
# draw_many_squares
# function defined above. If you don't see why, ** ASK FOR HELP. **
#
###########################################################################
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 34.597222
| 79
| 0.541851
|
c10358a10d3ef4464cb7a38a489cdfe2602f19e8
| 1,323
|
py
|
Python
|
os_client_config/defaults.py
|
mail2nsrajesh/os-client-config
|
30c8729f782c0c13ca872260085d5b7b7c37df61
|
[
"Apache-2.0"
] | null | null | null |
os_client_config/defaults.py
|
mail2nsrajesh/os-client-config
|
30c8729f782c0c13ca872260085d5b7b7c37df61
|
[
"Apache-2.0"
] | null | null | null |
os_client_config/defaults.py
|
mail2nsrajesh/os-client-config
|
30c8729f782c0c13ca872260085d5b7b7c37df61
|
[
"Apache-2.0"
] | 1
|
2020-07-21T02:18:23.000Z
|
2020-07-21T02:18:23.000Z
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
_json_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'defaults.json')
_defaults = None
def get_defaults():
global _defaults
if not _defaults:
# Python language specific defaults
# These are defaults related to use of python libraries, they are
# not qualities of a cloud.
_defaults = dict(
api_timeout=None,
verify=True,
cacert=None,
cert=None,
key=None,
)
with open(_json_path, 'r') as json_file:
updates = json.load(json_file)
if updates is not None:
_defaults.update(updates)
return _defaults.copy()
| 31.5
| 75
| 0.668178
|
dac9733af4075f4e8fc402e1233b9f075dffabd5
| 4,951
|
py
|
Python
|
size_tracker.py
|
TwidWawMon/Sims4SlowGrowthMod
|
dd50fb7a67666b71b4828325344551c7fcb1b0c8
|
[
"MIT"
] | null | null | null |
size_tracker.py
|
TwidWawMon/Sims4SlowGrowthMod
|
dd50fb7a67666b71b4828325344551c7fcb1b0c8
|
[
"MIT"
] | null | null | null |
size_tracker.py
|
TwidWawMon/Sims4SlowGrowthMod
|
dd50fb7a67666b71b4828325344551c7fcb1b0c8
|
[
"MIT"
] | null | null | null |
import services
from sims.sim_info_types import Gender
from util import get_sim_info
from sim_modifier import set_all_sims_normal, set_sim_normal, set_sizes
from logger import Logger
import json
import os
import random
import tempfile
save_file_path = os.path.join(tempfile.gettempdir(), "sims4.txt")
class SizeTracker:
def __init__(self):
# Keys: sim IDs (as integers)
# Values: array of [height, chest, growth, first_name]
self.sizes = {}
def read_or_create_size_file(self):
Logger.log("Loading from disk")
if not os.path.exists(save_file_path):
Logger.log("File does not exist")
self.save_sizes()
else:
with open(save_file_path, "r") as f:
sizes_with_str_keys = json.loads(f.read())
self.sizes = {}
for k, v in sizes_with_str_keys.items():
self.sizes[int(k)] = v
Logger.log("Read %s size(s)" % len(self.sizes))
def save_sizes(self):
Logger.log("Saving sizes to disk")
with open(save_file_path, "wt") as f:
f.write(json.dumps(self.sizes))
def grow_sim(self, id: int):
sim_info = get_sim_info(id)
if sim_info.zone_id != services.current_zone_id(): return
if sim_info.gender == Gender.FEMALE and random.randint(0, 7) == 5:
old = self.sizes[id][2]
change = random.uniform(1, 3.5)
if random.randint(0, 10) == 5:
change += random.uniform(1, 7)
self.sizes[id][2] = min(100, old + change)
Logger.log("Auto-blessing %s: %s to %s" % (sim_info.first_name + " " + sim_info.last_name, old, self.sizes[id][2]))
start_sizes = self.sizes[id]
rate = start_sizes[2]
height_growth = random.uniform(0, rate / 166)
chest_growth = random.uniform(0, rate / 250)
if start_sizes[0] < 0:
height_growth *= 7
# If your height is negative, then don't let it grow beyond 0 in one jump.
height_growth = min(height_growth, abs(start_sizes[0]))
start_sizes[0] += height_growth
start_sizes[1] += chest_growth
self.sizes[id] = start_sizes
self.sizes[id][0] = round(self.sizes[id][0], 3)
self.sizes[id][1] = round(self.sizes[id][1], 3)
self.sizes[id][2] = round(self.sizes[id][2], 3)
set_sizes(id, start_sizes[0], start_sizes[1])
def adjust_sizes(self):
all_sims = list(services.sim_info_manager().get_all())
for sim in all_sims:
sim_info = sim.get_sim_info()
if sim_info.zone_id != services.current_zone_id(): continue
id = sim_info.id
if id not in self.sizes:
is_male = sim_info.gender == Gender.MALE
rate = 0 if is_male else random.uniform(1, 50)
start_height = 0 if is_male else -30
chest = random.uniform(0.0, 5.0)
self.sizes[id] = [start_height, chest, rate, sim_info.first_name]
set_sim_normal(sim_info)
Logger.log("Initialized %s" % self.get_size_string(id))
for id in self.sizes:
self.grow_sim(id)
def get_size_string(self, id: int):
if id not in self.sizes:
return "No size initialized for id==%s" % id
(height, chest, rate, first_name) = self.sizes[id]
return "%s: height==%s, chest==%s, growth==%s" % (first_name, round(height, 2), round(chest, 2), round(rate, 2))
def wipe(self):
self.sizes = {}
self.save_sizes()
Logger.log("Wiping all sizes")
set_all_sims_normal()
Logger.log("Wiped out sizes")
def bless(self, first_name: str, times: int):
times = min(times, 500)
if first_name == None:
return
if len(self.sizes) == 0:
Logger.log("Sizes not loaded")
for k, v in self.sizes.items():
if v[3].lower() == first_name.lower():
Logger.log("Blessing %s %s time(s)" % (first_name, times))
for i in range(int(times)):
self.grow_sim(k)
def get_sim_ids_by_first_name(self, first_name:str):
ids = []
for id, v in self.sizes.items():
if v[3].lower() == first_name.lower():
ids.append(id)
return ids
def chest(self, first_name: str, times: int):
times = min(times, 500)
if first_name == None:
return
if len(self.sizes) == 0:
Logger.log("Sizes not loaded")
for id in self.get_sim_ids_by_first_name(first_name):
self.sizes[id][1] += int(times)
Logger.log("Blessing chest of %s to %s" % (first_name, self.sizes[id][1]))
set_sizes(id, self.sizes[id][0], self.sizes[id][1])
def bless_rate(self, first_name: str, times: int):
times = min(times, 500)
if first_name == None:
return
if len(self.sizes) == 0:
Logger.log("Sizes not loaded")
for id in self.get_sim_ids_by_first_name(first_name):
self.sizes[id][2] *= int(times)
Logger.log("Blessing %s to %s" % (first_name, self.sizes[id][2]))
| 34.144828
| 125
| 0.606948
|
7f6f9f2731e7561061288ee716fc5d48f94330aa
| 2,082
|
py
|
Python
|
AbatementProject/InputDisplacement/py_local/COE_abate.py
|
ChampionApe/GamsPythonModels
|
aaa234b2627cda2b92e478e8e8503bf9778aebeb
|
[
"MIT"
] | null | null | null |
AbatementProject/InputDisplacement/py_local/COE_abate.py
|
ChampionApe/GamsPythonModels
|
aaa234b2627cda2b92e478e8e8503bf9778aebeb
|
[
"MIT"
] | null | null | null |
AbatementProject/InputDisplacement/py_local/COE_abate.py
|
ChampionApe/GamsPythonModels
|
aaa234b2627cda2b92e478e8e8503bf9778aebeb
|
[
"MIT"
] | null | null | null |
def equation(name,domains,conditions,LHS,RHS):
return f"""{name}{domains}{'$('+conditions+')' if conditions != '' else conditions}.. {LHS} =E= {RHS};"""
class V1:
def __init__(self,version='std',**kwargs):
self.version = version
def run(self,vartext,domains,conditions,name):
if self.version is 'std':
out = self.unit_cost(f"E_uc_{name}", domains['uc'],conditions['uc'],
vartext['PwT'],vartext['qD'],vartext['cbar'],vartext['n'],vartext['k2t'])+'\n\t'
elif self.version is 'Q2P':
out = self.unit_cost_Q2P(f"E_uc_{name}", domains['uc'],conditions['uc'],
vartext['PwT'],vartext['qD'],vartext['cbar'],vartext['n'],vartext['k2t'],vartext['q2p'])+'\n\t'
out += self.current_application(f"E_currapp_{name}", domains['currapp'], conditions['currapp'],
vartext['qD'],vartext['theta_c'],vartext['n'],vartext['u2c'],vartext['c2e'])+'\n\t'
out += self.potential_application(f"E_potapp_{name}", domains['potapp'], conditions['potapp'],
vartext['qD'],vartext['theta_p'],vartext['n'],vartext['c2e'])
return out
def unit_cost(self,e_name,domains,conditions,PwT,qD,cbar,n,k2t):
"""
Equation for calibration of unit cost of the technology.
"""
RHS = f"""{cbar['b']} * sum({n['a_aa']}$({k2t['b']}), {qD['a_aa']}) / {PwT['b']}"""
return equation(e_name,domains,conditions,qD['b'],RHS)
def unit_cost_Q2P(self,e_name,domains,conditions,PwT,qD,cbar,n,k2t,q2p):
RHS = f"""{cbar['b']} * sum({n['a_aa']}$({k2t['b']}), {qD['a_aa']}) / sum({n['a_aaa']}$({q2p['aa_aaa']}),{PwT['a_aaa']})"""
return equation(e_name,domains,conditions,qD['b'],RHS)
def current_application(self,e_name,domains,conditions,qD,theta_c,n,u2c,c2e):
"""
The share u/E = theta_c.
"""
RHS = f"""{theta_c['b']} * sum({n['a_aa']}$({u2c['b']}), sum({n['a_aaa']}$({c2e['a_aa.aa_aaa']}), {qD['a_aaa']}))"""
return equation(e_name,domains,conditions,qD['b'],RHS)
def potential_application(self,e_name,domains,conditions,qD,theta_p,n,c2e):
RHS = f"""{theta_p['b']} * sum({n['a_aa']}$({c2e['b']}), {qD['a_aa']})"""
return equation(e_name,domains,conditions,qD['b'],RHS)
| 47.318182
| 125
| 0.637848
|
f9401bd2ca3c217db416046e663f3874691af001
| 387
|
py
|
Python
|
python/cursoemvideo-python/03-mundo-3/listas/lista 1/dividindo valores em varias listas.py
|
Alex4gtx/estudos
|
cf5908c543be1b112157b1c95f3d987484ff2505
|
[
"MIT"
] | null | null | null |
python/cursoemvideo-python/03-mundo-3/listas/lista 1/dividindo valores em varias listas.py
|
Alex4gtx/estudos
|
cf5908c543be1b112157b1c95f3d987484ff2505
|
[
"MIT"
] | null | null | null |
python/cursoemvideo-python/03-mundo-3/listas/lista 1/dividindo valores em varias listas.py
|
Alex4gtx/estudos
|
cf5908c543be1b112157b1c95f3d987484ff2505
|
[
"MIT"
] | null | null | null |
lista = []
par = []
impar = []
while True:
lista.append(int(input('Digite um numero: ')))
q = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]
if q in 'Nn':
break
for v in lista:
if v % 2 == 0:
par.append(v)
else:
impar.append(v)
print(f'{30*"-="}\nA lista completa é {lista}\nA lista de pares é {par}\nA lista de impares é {impar}')
| 25.8
| 103
| 0.555556
|
5fdb63703a26a68c2bc05c2a472de67672919b78
| 7,935
|
py
|
Python
|
docs/source/conf.py
|
emilianobonassi/lnav
|
37f46af7a691192fba4a889e4e4da6cbfcaac8e4
|
[
"BSD-2-Clause"
] | null | null | null |
docs/source/conf.py
|
emilianobonassi/lnav
|
37f46af7a691192fba4a889e4e4da6cbfcaac8e4
|
[
"BSD-2-Clause"
] | 3
|
2015-09-30T22:25:05.000Z
|
2015-10-01T00:05:46.000Z
|
docs/source/conf.py
|
emilianobonassi/lnav
|
37f46af7a691192fba4a889e4e4da6cbfcaac8e4
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# lnav documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 12 21:09:39 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
this_dir = os.path.abspath('.')
src_dir = os.path.join(this_dir, "..", "..", "src")
sys.path.insert(0, src_dir)
import format2csv
format2csv.main(["",
os.path.join(src_dir, "default-log-formats.json"),
os.path.join(this_dir, "format-table.csv")])
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lnav'
copyright = u'2018, Tim Stack'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8'
# The full version, including alpha/beta/rc tags.
release = '0.8.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'lnavdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'lnav.tex', u'lnav Documentation',
u'Tim Stack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'lnav', u'lnav Documentation',
[u'Tim Stack'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'lnav', u'lnav Documentation',
u'Tim Stack', 'lnav', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.613546
| 80
| 0.708759
|
1ba4166ce7f2332f3a69ca756be036afcf2dd108
| 1,490
|
py
|
Python
|
HCBASimWeb/user/forms.py
|
pribond/HCBASimWeb
|
0ed9d3ae5b7b2aad7d5410740fe3d0d5f6114331
|
[
"BSD-3-Clause"
] | 10
|
2016-11-28T05:01:24.000Z
|
2020-04-04T17:04:11.000Z
|
HCBASimWeb/user/forms.py
|
pribond/HCBASimWeb
|
0ed9d3ae5b7b2aad7d5410740fe3d0d5f6114331
|
[
"BSD-3-Clause"
] | 349
|
2017-03-25T23:44:47.000Z
|
2021-03-20T03:46:39.000Z
|
HCBASimWeb/user/forms.py
|
pribond/HCBASimWeb
|
0ed9d3ae5b7b2aad7d5410740fe3d0d5f6114331
|
[
"BSD-3-Clause"
] | 2
|
2016-12-02T15:34:40.000Z
|
2017-03-21T13:51:39.000Z
|
# -*- coding: utf-8 -*-
"""User forms."""
from flask_wtf import Form
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import User
class RegisterForm(Form):
"""Register form."""
username = StringField('Username',
validators=[DataRequired(), Length(min=3, max=25)])
email = StringField('Email',
validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password',
validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Verify password',
[DataRequired(), EqualTo('password', message='Passwords must match')])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(RegisterForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = User.query.filter_by(username=self.username.data).first()
if user:
self.username.errors.append('Username already registered')
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append('Email already registered')
return False
return True
| 36.341463
| 98
| 0.607383
|
5bfdfe16deded11a66a1509b81a8e0583e6797be
| 5,150
|
py
|
Python
|
dynamic_programming/knapsack.py
|
Jacksole/Python
|
8d8b403aa621e510da808cda38642a3c9da1d28d
|
[
"MIT"
] | null | null | null |
dynamic_programming/knapsack.py
|
Jacksole/Python
|
8d8b403aa621e510da808cda38642a3c9da1d28d
|
[
"MIT"
] | 1
|
2020-07-28T16:58:18.000Z
|
2020-07-28T16:58:18.000Z
|
dynamic_programming/knapsack.py
|
Jacksole/Python
|
8d8b403aa621e510da808cda38642a3c9da1d28d
|
[
"MIT"
] | null | null | null |
"""
Given weights and values of n items, put these items in a knapsack of
capacity W to get the maximum total value in the knapsack.
Note that only the integer weights 0-1 knapsack problem is solvable
using dynamic programming.
"""
def MF_knapsack(i, wt, val, j):
"""
This code involves the concept of memory functions. Here we solve the subproblems which are needed
unlike the below example
F is a 2D array with -1s filled up
"""
global F # a global dp table for knapsack
if F[i][j] < 0:
if j < wt[i - 1]:
val = MF_knapsack(i - 1, wt, val, j)
else:
val = max(
MF_knapsack(i - 1, wt, val, j),
MF_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1],
)
F[i][j] = val
return F[i][j]
def knapsack(W, wt, val, n):
dp = [[0 for i in range(W + 1)] for j in range(n + 1)]
for i in range(1, n + 1):
for w in range(1, W + 1):
if wt[i - 1] <= w:
dp[i][w] = max(val[i - 1] + dp[i - 1]
[w - wt[i - 1]], dp[i - 1][w])
else:
dp[i][w] = dp[i - 1][w]
return dp[n][W], dp
def knapsack_with_example_solution(W: int, wt: list, val: list):
"""
Solves the integer weights knapsack problem returns one of
the several possible optimal subsets.
Parameters
---------
W: int, the total maximum weight for the given knapsack problem.
wt: list, the vector of weights for all items where wt[i] is the weight
of the ith item.
val: list, the vector of values for all items where val[i] is the value
of te ith item
Returns
-------
optimal_val: float, the optimal value for the given knapsack problem
example_optional_set: set, the indices of one of the optimal subsets
which gave rise to the optimal value.
Examples
-------
>>> knapsack_with_example_solution(10, [1, 3, 5, 2], [10, 20, 100, 22])
(142, {2, 3, 4})
>>> knapsack_with_example_solution(6, [4, 3, 2, 3], [3, 2, 4, 4])
(8, {3, 4})
>>> knapsack_with_example_solution(6, [4, 3, 2, 3], [3, 2, 4])
Traceback (most recent call last):
...
ValueError: The number of weights must be the same as the number of values.
But got 4 weights and 3 values
"""
if not (isinstance(wt, (list, tuple)) and isinstance(val, (list, tuple))):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples"
)
num_items = len(wt)
if num_items != len(val):
raise ValueError(
"The number of weights must be the "
"same as the number of values.\nBut "
"got {} weights and {} values".format(num_items, len(val))
)
for i in range(num_items):
if not isinstance(wt[i], int):
raise TypeError(
"All weights must be integers but "
"got weight of type {} at index {}".format(type(wt[i]), i)
)
optimal_val, dp_table = knapsack(W, wt, val, num_items)
example_optional_set = set()
_construct_solution(dp_table, wt, num_items, W, example_optional_set)
return optimal_val, example_optional_set
def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set):
"""
Recursively reconstructs one of the optimal subsets given
a filled DP table and the vector of weights
Parameters
---------
dp: list of list, the table of a solved integer weight dynamic programming problem
wt: list or tuple, the vector of weights of the items
i: int, the index of the item under consideration
j: int, the current possible maximum weight
optimal_set: set, the optimal subset so far. This gets modified by the function.
Returns
-------
None
"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(dp, wt, i - 1, j, optimal_set)
else:
optimal_set.add(i)
_construct_solution(dp, wt, i - 1, j - wt[i - 1], optimal_set)
if __name__ == "__main__":
"""
Adding test case for knapsack
"""
val = [3, 2, 4, 4]
wt = [4, 3, 2, 3]
n = 4
w = 6
F = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)]
for j in range(n + 1)]
optimal_solution, _ = knapsack(w, wt, val, n)
print(optimal_solution)
print(MF_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
optimal_solution, optimal_subset = knapsack_with_example_solution(
w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 35.763889
| 102
| 0.594175
|
16bc9f8f261b53958b885666eca3c9ac04f9daff
| 3,854
|
py
|
Python
|
Raws-Maji/Lapis ReLiGHTs [BD]/lapis_bdmenu02.py
|
Ichunjo/encode-script
|
389a9f497e637eaade6f99acee816636856961d4
|
[
"MIT"
] | 36
|
2019-11-08T20:50:07.000Z
|
2022-03-23T05:43:55.000Z
|
Raws-Maji/Lapis ReLiGHTs [BD]/lapis_bdmenu02.py
|
Ichunjo/encode-script
|
389a9f497e637eaade6f99acee816636856961d4
|
[
"MIT"
] | 1
|
2019-11-08T21:26:16.000Z
|
2019-11-08T21:26:16.000Z
|
Raws-Maji/Lapis ReLiGHTs [BD]/lapis_bdmenu02.py
|
Ichunjo/encode-script
|
389a9f497e637eaade6f99acee816636856961d4
|
[
"MIT"
] | 7
|
2019-11-08T21:10:47.000Z
|
2022-03-28T21:57:04.000Z
|
"""Lapis script"""
__author__ = 'Vardë'
import os
import sys
import shlex
import subprocess
from typing import NamedTuple, Optional, Dict, Any
from pathlib import Path
from functools import partial
from regress import Regress, ReconstructMulti
import debandshit as dbs
import vardefunc as vdf
import atomchtools as atf
import G41Fun as gf
from vsutil import depth, split, join
import lvsfunc as lvf
import vapoursynth as vs
core = vs.core
core = vs.core
class InfosBD(NamedTuple):
path: str
src: str
src_clip: vs.VideoNode
frame_start: int
frame_end: int
src_cut: vs.VideoNode
a_src: str
a_src_cut: str
a_enc_cut: str
name: str
output: str
chapter: str
output_final: str
def infos_bd(path, frame_start, frame_end) -> InfosBD:
src = path + '.m2ts'
src_clip = lvf.src(src, stream_index=0, ff_loglevel=4)
src_cut = src_clip[frame_start:frame_end] if (frame_start or frame_end) else src_clip
a_src = path + '_track_{}.wav'
a_src_cut = path + '_cut_track_{}.wav'
a_enc_cut = path + '_track_{}.m4a'
name = Path(sys.argv[0]).stem
output = name + '.265'
chapter = 'chapters/' + name + '.txt'
output_final = name + '.mkv'
return InfosBD(path, src, src_clip, frame_start, frame_end,
src_cut, a_src, a_src_cut, a_enc_cut,
name, output, chapter, output_final)
JPBD = infos_bd(r'[BDMV][201125][GNXA-2292][Lapis_Re_LiGHTs][vol.2]\LAPIS_RE_LIGHTS_2\BDMV\STREAM\00011', None, None)
def do_filter():
"""Vapoursynth filtering"""
src = JPBD.src_cut
src = depth(src, 32)
out = src
full_range = core.resize.Bicubic(out, range_in=0, range=1, dither_type='error_diffusion')
out = full_range
radius = 3
y, u, v = split(out)
y_m = core.resize.Point(y, 960, 1080, src_left=-1)
y_m = core.resize.Bicubic(y_m, 960, 540)
def demangle(clip):
return vdf.nnedi3_upscale(clip, core.resize.Bicubic, True, pscrn=0)
y_m, u, v = map(demangle, (y_m, u, v))
y_fixup = core.std.MakeDiff(y, y_m)
yu, yv = Regress(y_m, u, v, radius=radius)
u_fixup = ReconstructMulti(y_fixup, yu, radius=radius)
u_r = core.std.MergeDiff(u, u_fixup)
v_fixup = ReconstructMulti(y_fixup, yv, radius=radius)
v_r = core.std.MergeDiff(v, v_fixup)
out = join([y, u_r, v_r])
out = depth(out, 16)
dehalo = gf.MaskedDHA(out, rx=1.25, ry=1.25, darkstr=0.10, brightstr=1.0, maskpull=46, maskpush=148)
out = dehalo
upscale = atf.eedi3Scale(out, 2160, pscrn=0)
out = upscale
dehalo = gf.MaskedDHA(out, rx=1.15, ry=1.15, darkstr=0.10, brightstr=1.0, maskpull=46, maskpush=148)
out = dehalo
deband_mask = lvf.denoise.detail_mask(out, brz_a=2000, brz_b=1000)
deband = dbs.f3kpf(out, 28, 48, 48)
deband = core.std.MaskedMerge(deband, out, deband_mask)
out = deband
grain = core.grain.Add(out, 1)
out = grain
return out.std.AssumeFPS(fpsnum=1, fpsden=1)[:1]
def do_encode(clip: vs.VideoNode)-> None:
"""Compression with x26X"""
if not os.path.isfile(JPBD.output):
print('\n\n\nVideo encoding')
ffv1_args = [
'ffmpeg', '-i', '-', '-vcodec', 'ffv1', '-coder', '1', '-context', '0',
'-g', '1', '-level', '3', '-threads', '8',
'-slices', '24', '-slicecrc', '1', '-color_range', 'pc', JPBD.name + "_lossless.mkv"
]
print("Encoder command: ", " ".join(ffv1_args), "\n")
process = subprocess.Popen(ffv1_args, stdin=subprocess.PIPE)
clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
process.communicate()
if __name__ == '__main__':
FILTERED = do_filter()
do_encode(FILTERED)
| 27.140845
| 117
| 0.639076
|
0d970784b8cbe815033b6d75e9b1805c1bf769c1
| 514
|
py
|
Python
|
plotly/validators/scattercarpet/marker/colorbar/tickformatstop/_name.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/scattercarpet/marker/colorbar/tickformatstop/_name.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/scattercarpet/marker/colorbar/tickformatstop/_name.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='name',
parent_name='scattercarpet.marker.colorbar.tickformatstop',
**kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 27.052632
| 67
| 0.620623
|
cbc88eb802fd54879e1f116a727db75fbcc43da0
| 3,460
|
py
|
Python
|
awacs/clouddirectory.py
|
alanjjenkins/awacs
|
0065e1833eae6a6070edb4ab4f180fd10b26c19a
|
[
"BSD-2-Clause"
] | null | null | null |
awacs/clouddirectory.py
|
alanjjenkins/awacs
|
0065e1833eae6a6070edb4ab4f180fd10b26c19a
|
[
"BSD-2-Clause"
] | null | null | null |
awacs/clouddirectory.py
|
alanjjenkins/awacs
|
0065e1833eae6a6070edb4ab4f180fd10b26c19a
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Cloud Directory"
prefix = "clouddirectory"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AddFacetToObject = Action("AddFacetToObject")
ApplySchema = Action("ApplySchema")
AttachObject = Action("AttachObject")
AttachPolicy = Action("AttachPolicy")
AttachToIndex = Action("AttachToIndex")
AttachTypedLink = Action("AttachTypedLink")
BatchRead = Action("BatchRead")
BatchWrite = Action("BatchWrite")
CreateDirectory = Action("CreateDirectory")
CreateFacet = Action("CreateFacet")
CreateIndex = Action("CreateIndex")
CreateObject = Action("CreateObject")
CreateSchema = Action("CreateSchema")
CreateTypedLinkFacet = Action("CreateTypedLinkFacet")
DeleteDirectory = Action("DeleteDirectory")
DeleteFacet = Action("DeleteFacet")
DeleteObject = Action("DeleteObject")
DeleteSchema = Action("DeleteSchema")
DeleteTypedLinkFacet = Action("DeleteTypedLinkFacet")
DetachFromIndex = Action("DetachFromIndex")
DetachObject = Action("DetachObject")
DetachPolicy = Action("DetachPolicy")
DetachTypedLink = Action("DetachTypedLink")
DisableDirectory = Action("DisableDirectory")
EnableDirectory = Action("EnableDirectory")
GetDirectory = Action("GetDirectory")
GetFacet = Action("GetFacet")
GetLinkAttributes = Action("GetLinkAttributes")
GetObjectAttributes = Action("GetObjectAttributes")
GetObjectInformation = Action("GetObjectInformation")
GetSchemaAsJson = Action("GetSchemaAsJson")
GetTypedLinkFacetInformation = Action("GetTypedLinkFacetInformation")
ListAppliedSchemaArns = Action("ListAppliedSchemaArns")
ListAttachedIndices = Action("ListAttachedIndices")
ListDevelopmentSchemaArns = Action("ListDevelopmentSchemaArns")
ListDirectories = Action("ListDirectories")
ListFacetAttributes = Action("ListFacetAttributes")
ListFacetNames = Action("ListFacetNames")
ListIncomingTypedLinks = Action("ListIncomingTypedLinks")
ListIndex = Action("ListIndex")
ListObjectAttributes = Action("ListObjectAttributes")
ListObjectChildren = Action("ListObjectChildren")
ListObjectParentPaths = Action("ListObjectParentPaths")
ListObjectParents = Action("ListObjectParents")
ListObjectPolicies = Action("ListObjectPolicies")
ListOutgoingTypedLinks = Action("ListOutgoingTypedLinks")
ListPolicyAttachments = Action("ListPolicyAttachments")
ListPublishedSchemaArns = Action("ListPublishedSchemaArns")
ListTagsForResource = Action("ListTagsForResource")
ListTypedLinkFacetAttributes = Action("ListTypedLinkFacetAttributes")
ListTypedLinkFacetNames = Action("ListTypedLinkFacetNames")
LookupPolicy = Action("LookupPolicy")
PublishSchema = Action("PublishSchema")
PutSchemaFromJson = Action("PutSchemaFromJson")
RemoveFacetFromObject = Action("RemoveFacetFromObject")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateFacet = Action("UpdateFacet")
UpdateLinkAttributes = Action("UpdateLinkAttributes")
UpdateObjectAttributes = Action("UpdateObjectAttributes")
UpdateSchema = Action("UpdateSchema")
UpdateTypedLinkFacet = Action("UpdateTypedLinkFacet")
| 39.770115
| 88
| 0.799133
|
d2c5f8ef7256909f34f345eae686051263dcbd98
| 26,463
|
py
|
Python
|
Lib/site-packages/notebook/services/kernels/handlers.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/notebook/services/kernels/handlers.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/notebook/services/kernels/handlers.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
"""Tornado handlers for kernels.
Preliminary documentation at https://github.com/ipython/ipython/wiki/IPEP-16%3A-Notebook-multi-directory-dashboard-and-URL-mapping#kernels-api
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import logging
from textwrap import dedent
from tornado import gen, web
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
from jupyter_client import protocol_version as client_protocol_version
try:
from jupyter_client.jsonutil import json_default
except ImportError:
from jupyter_client.jsonutil import (
date_default as json_default
)
from ipython_genutils.py3compat import cast_unicode
from notebook.utils import maybe_future, url_path_join, url_escape
from ...base.handlers import APIHandler
from ...base.zmqhandlers import AuthenticatedZMQStreamHandler, deserialize_binary_message
class MainKernelHandler(APIHandler):
@web.authenticated
@gen.coroutine
def get(self):
km = self.kernel_manager
kernels = yield maybe_future(km.list_kernels())
self.finish(json.dumps(kernels, default=json_default))
@web.authenticated
@gen.coroutine
def post(self):
km = self.kernel_manager
model = self.get_json_body()
if model is None:
model = {
'name': km.default_kernel_name
}
else:
model.setdefault('name', km.default_kernel_name)
kernel_id = yield maybe_future(km.start_kernel(kernel_name=model['name']))
model = yield maybe_future(km.kernel_model(kernel_id))
location = url_path_join(self.base_url, 'api', 'kernels', url_escape(kernel_id))
self.set_header('Location', location)
self.set_status(201)
self.finish(json.dumps(model, default=json_default))
class KernelHandler(APIHandler):
@web.authenticated
@gen.coroutine
def get(self, kernel_id):
km = self.kernel_manager
model = yield maybe_future(km.kernel_model(kernel_id))
self.finish(json.dumps(model, default=json_default))
@web.authenticated
@gen.coroutine
def delete(self, kernel_id):
km = self.kernel_manager
yield maybe_future(km.shutdown_kernel(kernel_id))
self.set_status(204)
self.finish()
class KernelActionHandler(APIHandler):
@web.authenticated
@gen.coroutine
def post(self, kernel_id, action):
km = self.kernel_manager
if action == 'interrupt':
yield maybe_future(km.interrupt_kernel(kernel_id))
self.set_status(204)
if action == 'restart':
try:
yield maybe_future(km.restart_kernel(kernel_id))
except Exception as e:
self.log.error("Exception restarting kernel", exc_info=True)
self.set_status(500)
else:
model = yield maybe_future(km.kernel_model(kernel_id))
self.write(json.dumps(model, default=json_default))
self.finish()
class ZMQChannelsHandler(AuthenticatedZMQStreamHandler):
'''There is one ZMQChannelsHandler per running kernel and it oversees all
the sessions.
'''
# class-level registry of open sessions
# allows checking for conflict on session-id,
# which is used as a zmq identity and must be unique.
_open_sessions = {}
@property
def kernel_info_timeout(self):
km_default = self.kernel_manager.kernel_info_timeout
return self.settings.get('kernel_info_timeout', km_default)
@property
def iopub_msg_rate_limit(self):
return self.settings.get('iopub_msg_rate_limit', 0)
@property
def iopub_data_rate_limit(self):
return self.settings.get('iopub_data_rate_limit', 0)
@property
def rate_limit_window(self):
return self.settings.get('rate_limit_window', 1.0)
def __repr__(self):
return f"{self.__class__.__name__}({getattr(self, 'kernel_id', 'uninitialized')})"
def create_stream(self):
km = self.kernel_manager
identity = self.session.bsession
for channel in ("iopub", "shell", "control", "stdin"):
meth = getattr(km, "connect_" + channel)
self.channels[channel] = stream = meth(self.kernel_id, identity=identity)
stream.channel = channel
def nudge(self):
"""Nudge the zmq connections with kernel_info_requests
Returns a Future that will resolve when we have received
a shell or control reply and at least one iopub message,
ensuring that zmq subscriptions are established,
sockets are fully connected, and kernel is responsive.
Keeps retrying kernel_info_request until these are both received.
"""
kernel = self.kernel_manager.get_kernel(self.kernel_id)
# Do not nudge busy kernels as kernel info requests sent to shell are
# queued behind execution requests.
# nudging in this case would cause a potentially very long wait
# before connections are opened,
# plus it is *very* unlikely that a busy kernel will not finish
# establishing its zmq subscriptions before processing the next request.
if getattr(kernel, "execution_state") == "busy":
self.log.debug("Nudge: not nudging busy kernel %s", self.kernel_id)
f = Future()
f.set_result(None)
return f
# Use a transient shell channel to prevent leaking
# shell responses to the front-end.
shell_channel = kernel.connect_shell()
# Use a transient control channel to prevent leaking
# control responses to the front-end.
control_channel = kernel.connect_control()
# The IOPub used by the client, whose subscriptions we are verifying.
iopub_channel = self.channels["iopub"]
info_future = Future()
iopub_future = Future()
both_done = gen.multi([info_future, iopub_future])
def finish(f=None):
"""Ensure all futures are resolved
which in turn triggers cleanup
"""
for f in (info_future, iopub_future):
if not f.done():
f.set_result(None)
def cleanup(f=None):
"""Common cleanup"""
loop.remove_timeout(nudge_handle)
iopub_channel.stop_on_recv()
if not shell_channel.closed():
shell_channel.close()
if not control_channel.closed():
control_channel.close()
# trigger cleanup when both message futures are resolved
both_done.add_done_callback(cleanup)
def on_shell_reply(msg):
self.log.debug("Nudge: shell info reply received: %s", self.kernel_id)
if not info_future.done():
self.log.debug("Nudge: resolving shell future: %s", self.kernel_id)
info_future.set_result(None)
def on_control_reply(msg):
self.log.debug("Nudge: control info reply received: %s", self.kernel_id)
if not info_future.done():
self.log.debug("Nudge: resolving control future: %s", self.kernel_id)
info_future.set_result(None)
def on_iopub(msg):
self.log.debug("Nudge: IOPub received: %s", self.kernel_id)
if not iopub_future.done():
iopub_channel.stop_on_recv()
self.log.debug("Nudge: resolving iopub future: %s", self.kernel_id)
iopub_future.set_result(None)
iopub_channel.on_recv(on_iopub)
shell_channel.on_recv(on_shell_reply)
control_channel.on_recv(on_control_reply)
loop = IOLoop.current()
# Nudge the kernel with kernel info requests until we get an IOPub message
def nudge(count):
count += 1
# NOTE: this close check appears to never be True during on_open,
# even when the peer has closed the connection
if self.ws_connection is None or self.ws_connection.is_closing():
self.log.debug(
"Nudge: cancelling on closed websocket: %s", self.kernel_id
)
finish()
return
# check for stopped kernel
if self.kernel_id not in self.kernel_manager:
self.log.debug(
"Nudge: cancelling on stopped kernel: %s", self.kernel_id
)
finish()
return
# check for closed zmq socket
if shell_channel.closed():
self.log.debug("Nudge: cancelling on closed zmq socket: %s", self.kernel_id)
finish()
return
# check for closed zmq socket
if control_channel.closed():
self.log.debug(
"Nudge: cancelling on closed zmq socket: %s", self.kernel_id
)
finish()
return
if not both_done.done():
log = self.log.warning if count % 10 == 0 else self.log.debug
log(f"Nudge: attempt {count} on kernel {self.kernel_id}")
self.session.send(shell_channel, "kernel_info_request")
self.session.send(control_channel, "kernel_info_request")
nonlocal nudge_handle
nudge_handle = loop.call_later(0.5, nudge, count)
nudge_handle = loop.call_later(0, nudge, count=0)
# resolve with a timeout if we get no response
future = gen.with_timeout(loop.time() + self.kernel_info_timeout, both_done)
# ensure we have no dangling resources or unresolved Futures in case of timeout
future.add_done_callback(finish)
return future
def request_kernel_info(self):
"""send a request for kernel_info"""
km = self.kernel_manager
kernel = km.get_kernel(self.kernel_id)
try:
# check for previous request
future = kernel._kernel_info_future
except AttributeError:
self.log.debug("Requesting kernel info from %s", self.kernel_id)
# Create a kernel_info channel to query the kernel protocol version.
# This channel will be closed after the kernel_info reply is received.
if self.kernel_info_channel is None:
self.kernel_info_channel = km.connect_shell(self.kernel_id)
self.kernel_info_channel.on_recv(self._handle_kernel_info_reply)
self.session.send(self.kernel_info_channel, "kernel_info_request")
# store the future on the kernel, so only one request is sent
kernel._kernel_info_future = self._kernel_info_future
else:
if not future.done():
self.log.debug("Waiting for pending kernel_info request")
future.add_done_callback(lambda f: self._finish_kernel_info(f.result()))
return self._kernel_info_future
def _handle_kernel_info_reply(self, msg):
"""process the kernel_info_reply
enabling msg spec adaptation, if necessary
"""
idents,msg = self.session.feed_identities(msg)
try:
msg = self.session.deserialize(msg)
except:
self.log.error("Bad kernel_info reply", exc_info=True)
self._kernel_info_future.set_result({})
return
else:
info = msg['content']
self.log.debug("Received kernel info: %s", info)
if msg['msg_type'] != 'kernel_info_reply' or 'protocol_version' not in info:
self.log.error("Kernel info request failed, assuming current %s", info)
info = {}
self._finish_kernel_info(info)
# close the kernel_info channel, we don't need it anymore
if self.kernel_info_channel:
self.kernel_info_channel.close()
self.kernel_info_channel = None
def _finish_kernel_info(self, info):
"""Finish handling kernel_info reply
Set up protocol adaptation, if needed,
and signal that connection can continue.
"""
protocol_version = info.get('protocol_version', client_protocol_version)
if protocol_version != client_protocol_version:
self.session.adapt_version = int(protocol_version.split('.')[0])
self.log.info(f"Adapting from protocol version {protocol_version} (kernel {self.kernel_id}) to {client_protocol_version} (client).")
if not self._kernel_info_future.done():
self._kernel_info_future.set_result(info)
def initialize(self):
super().initialize()
self.zmq_stream = None
self.channels = {}
self.kernel_id = None
self.kernel_info_channel = None
self._kernel_info_future = Future()
self._close_future = Future()
self.session_key = ''
# Rate limiting code
self._iopub_window_msg_count = 0
self._iopub_window_byte_count = 0
self._iopub_msgs_exceeded = False
self._iopub_data_exceeded = False
# Queue of (time stamp, byte count)
# Allows you to specify that the byte count should be lowered
# by a delta amount at some point in the future.
self._iopub_window_byte_queue = []
@gen.coroutine
def pre_get(self):
# authenticate first
super().pre_get()
# check session collision:
yield self._register_session()
# then request kernel info, waiting up to a certain time before giving up.
# We don't want to wait forever, because browsers don't take it well when
# servers never respond to websocket connection requests.
kernel = self.kernel_manager.get_kernel(self.kernel_id)
self.session.key = kernel.session.key
future = self.request_kernel_info()
def give_up():
"""Don't wait forever for the kernel to reply"""
if future.done():
return
self.log.warning("Timeout waiting for kernel_info reply from %s", self.kernel_id)
future.set_result({})
loop = IOLoop.current()
loop.add_timeout(loop.time() + self.kernel_info_timeout, give_up)
# actually wait for it
yield future
@gen.coroutine
def get(self, kernel_id):
self.kernel_id = cast_unicode(kernel_id, 'ascii')
yield super().get(kernel_id=kernel_id)
@gen.coroutine
def _register_session(self):
"""Ensure we aren't creating a duplicate session.
If a previous identical session is still open, close it to avoid collisions.
This is likely due to a client reconnecting from a lost network connection,
where the socket on our side has not been cleaned up yet.
"""
self.session_key = f'{self.kernel_id}:{self.session.session}'
stale_handler = self._open_sessions.get(self.session_key)
if stale_handler:
self.log.warning("Replacing stale connection: %s", self.session_key)
yield stale_handler.close()
if self.kernel_id in self.kernel_manager: # only update open sessions if kernel is actively managed
self._open_sessions[self.session_key] = self
def open(self, kernel_id):
super().open()
km = self.kernel_manager
km.notify_connect(kernel_id)
# on new connections, flush the message buffer
buffer_info = km.get_buffer(kernel_id, self.session_key)
if buffer_info and buffer_info['session_key'] == self.session_key:
self.log.info("Restoring connection for %s", self.session_key)
self.channels = buffer_info['channels']
connected = self.nudge()
def replay(value):
replay_buffer = buffer_info['buffer']
if replay_buffer:
self.log.info("Replaying %s buffered messages", len(replay_buffer))
for channel, msg_list in replay_buffer:
stream = self.channels[channel]
self._on_zmq_reply(stream, msg_list)
connected.add_done_callback(replay)
else:
try:
self.create_stream()
connected = self.nudge()
except web.HTTPError as e:
self.log.error("Error opening stream: %s", e)
# WebSockets don't response to traditional error codes so we
# close the connection.
for channel, stream in self.channels.items():
if not stream.closed():
stream.close()
self.close()
return
km.add_restart_callback(self.kernel_id, self.on_kernel_restarted)
km.add_restart_callback(self.kernel_id, self.on_restart_failed, 'dead')
def subscribe(value):
for channel, stream in self.channels.items():
stream.on_recv_stream(self._on_zmq_reply)
connected.add_done_callback(subscribe)
return connected
def on_message(self, msg):
if not self.channels:
# already closed, ignore the message
self.log.debug("Received message on closed websocket %r", msg)
return
if isinstance(msg, bytes):
msg = deserialize_binary_message(msg)
else:
msg = json.loads(msg)
channel = msg.pop('channel', None)
if channel is None:
self.log.warning("No channel specified, assuming shell: %s", msg)
channel = 'shell'
if channel not in self.channels:
self.log.warning("No such channel: %r", channel)
return
am = self.kernel_manager.allowed_message_types
mt = msg['header']['msg_type']
if am and mt not in am:
self.log.warning(f'Received message of type "{mt}", which is not allowed. Ignoring.')
else:
stream = self.channels[channel]
self.session.send(stream, msg)
def _on_zmq_reply(self, stream, msg_list):
idents, fed_msg_list = self.session.feed_identities(msg_list)
msg = self.session.deserialize(fed_msg_list)
parent = msg['parent_header']
def write_stderr(error_message):
self.log.warning(error_message)
msg = self.session.msg("stream",
content={"text": error_message + '\n', "name": "stderr"},
parent=parent
)
msg['channel'] = 'iopub'
self.write_message(json.dumps(msg, default=json_default))
channel = getattr(stream, 'channel', None)
msg_type = msg['header']['msg_type']
if channel == 'iopub' and msg_type == 'status' and msg['content'].get('execution_state') == 'idle':
# reset rate limit counter on status=idle,
# to avoid 'Run All' hitting limits prematurely.
self._iopub_window_byte_queue = []
self._iopub_window_msg_count = 0
self._iopub_window_byte_count = 0
self._iopub_msgs_exceeded = False
self._iopub_data_exceeded = False
if channel == 'iopub' and msg_type not in {'status', 'comm_open', 'execute_input'}:
# Remove the counts queued for removal.
now = IOLoop.current().time()
while len(self._iopub_window_byte_queue) > 0:
queued = self._iopub_window_byte_queue[0]
if (now >= queued[0]):
self._iopub_window_byte_count -= queued[1]
self._iopub_window_msg_count -= 1
del self._iopub_window_byte_queue[0]
else:
# This part of the queue hasn't be reached yet, so we can
# abort the loop.
break
# Increment the bytes and message count
self._iopub_window_msg_count += 1
if msg_type == 'stream':
byte_count = sum(len(x) for x in msg_list)
else:
byte_count = 0
self._iopub_window_byte_count += byte_count
# Queue a removal of the byte and message count for a time in the
# future, when we are no longer interested in it.
self._iopub_window_byte_queue.append((now + self.rate_limit_window, byte_count))
# Check the limits, set the limit flags, and reset the
# message and data counts.
msg_rate = float(self._iopub_window_msg_count) / self.rate_limit_window
data_rate = float(self._iopub_window_byte_count) / self.rate_limit_window
# Check the msg rate
if self.iopub_msg_rate_limit > 0 and msg_rate > self.iopub_msg_rate_limit:
if not self._iopub_msgs_exceeded:
self._iopub_msgs_exceeded = True
write_stderr(dedent(f"""\
IOPub message rate exceeded.
The notebook server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--NotebookApp.iopub_msg_rate_limit`.
Current values:
NotebookApp.iopub_msg_rate_limit={self.iopub_msg_rate_limit} (msgs/sec)
NotebookApp.rate_limit_window={self.rate_limit_window} (secs)
"""))
else:
# resume once we've got some headroom below the limit
if self._iopub_msgs_exceeded and msg_rate < (0.8 * self.iopub_msg_rate_limit):
self._iopub_msgs_exceeded = False
if not self._iopub_data_exceeded:
self.log.warning("iopub messages resumed")
# Check the data rate
if self.iopub_data_rate_limit > 0 and data_rate > self.iopub_data_rate_limit:
if not self._iopub_data_exceeded:
self._iopub_data_exceeded = True
write_stderr(dedent(f"""\
IOPub data rate exceeded.
The notebook server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--NotebookApp.iopub_data_rate_limit`.
Current values:
NotebookApp.iopub_data_rate_limit={self.iopub_data_rate_limit} (bytes/sec)
NotebookApp.rate_limit_window={self.rate_limit_window} (secs)
"""))
else:
# resume once we've got some headroom below the limit
if self._iopub_data_exceeded and data_rate < (0.8 * self.iopub_data_rate_limit):
self._iopub_data_exceeded = False
if not self._iopub_msgs_exceeded:
self.log.warning("iopub messages resumed")
# If either of the limit flags are set, do not send the message.
if self._iopub_msgs_exceeded or self._iopub_data_exceeded:
# we didn't send it, remove the current message from the calculus
self._iopub_window_msg_count -= 1
self._iopub_window_byte_count -= byte_count
self._iopub_window_byte_queue.pop(-1)
return
super()._on_zmq_reply(stream, msg)
def close(self):
super().close()
return self._close_future
def on_close(self):
self.log.debug("Websocket closed %s", self.session_key)
# unregister myself as an open session (only if it's really me)
if self._open_sessions.get(self.session_key) is self:
self._open_sessions.pop(self.session_key)
km = self.kernel_manager
if self.kernel_id in km:
km.notify_disconnect(self.kernel_id)
km.remove_restart_callback(
self.kernel_id, self.on_kernel_restarted,
)
km.remove_restart_callback(
self.kernel_id, self.on_restart_failed, 'dead',
)
# start buffering instead of closing if this was the last connection
if km._kernel_connections[self.kernel_id] == 0:
km.start_buffering(self.kernel_id, self.session_key, self.channels)
self._close_future.set_result(None)
return
# This method can be called twice, once by self.kernel_died and once
# from the WebSocket close event. If the WebSocket connection is
# closed before the ZMQ streams are setup, they could be None.
for channel, stream in self.channels.items():
if stream is not None and not stream.closed():
stream.on_recv(None)
stream.close()
self.channels = {}
self._close_future.set_result(None)
def _send_status_message(self, status):
iopub = self.channels.get('iopub', None)
if iopub and not iopub.closed():
# flush IOPub before sending a restarting/dead status message
# ensures proper ordering on the IOPub channel
# that all messages from the stopped kernel have been delivered
iopub.flush()
msg = self.session.msg("status",
{'execution_state': status}
)
msg['channel'] = 'iopub'
self.write_message(json.dumps(msg, default=json_default))
def on_kernel_restarted(self):
logging.warn("kernel %s restarted", self.kernel_id)
self._send_status_message('restarting')
def on_restart_failed(self):
logging.error("kernel %s restarted failed!", self.kernel_id)
self._send_status_message('dead')
#-----------------------------------------------------------------------------
# URL to handler mappings
#-----------------------------------------------------------------------------
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
_kernel_action_regex = r"(?P<action>restart|interrupt)"
default_handlers = [
(r"/api/kernels", MainKernelHandler),
(fr"/api/kernels/{_kernel_id_regex}", KernelHandler),
(fr"/api/kernels/{_kernel_id_regex}/{_kernel_action_regex}", KernelActionHandler),
(fr"/api/kernels/{_kernel_id_regex}/channels", ZMQChannelsHandler),
]
| 40.901082
| 144
| 0.613687
|
0c07b2a8a15a73909e8363182415c0f830efddd0
| 18,948
|
py
|
Python
|
validateResource.py
|
aheynig/Redfish-Service-Validator
|
0acc466582adccefcba96c7904c767155b7e2576
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 22
|
2017-08-23T16:47:35.000Z
|
2022-03-21T09:35:42.000Z
|
validateResource.py
|
aheynig/Redfish-Service-Validator
|
0acc466582adccefcba96c7904c767155b7e2576
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 404
|
2017-03-09T05:23:50.000Z
|
2022-03-30T03:13:52.000Z
|
validateResource.py
|
aheynig/Redfish-Service-Validator
|
0acc466582adccefcba96c7904c767155b7e2576
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 27
|
2017-03-06T17:33:56.000Z
|
2022-03-30T21:03:25.000Z
|
# Copyright Notice:
# Copyright 2016-2021 DMTF. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Service-Validator/blob/master/LICENSE.md
import logging
from collections import Counter, OrderedDict
from io import StringIO
import common.traverse as traverse
import common.catalog as catalog
from validateRedfish import checkPropertyConformance, displayValue
from common.helper import getNamespace, getType, createContext, checkPayloadConformance, navigateJsonFragment, create_entry
my_logger = logging.getLogger()
my_logger.setLevel(logging.DEBUG)
class WarnFilter(logging.Filter):
def filter(self, rec):
return rec.levelno == logging.WARN
fmt = logging.Formatter('%(levelname)s - %(message)s')
def create_logging_capture(this_logger):
errorMessages = StringIO()
warnMessages = StringIO()
errh = logging.StreamHandler(errorMessages)
errh.setLevel(logging.ERROR)
errh.setFormatter(fmt)
warnh = logging.StreamHandler(warnMessages)
warnh.setLevel(logging.WARN)
warnh.addFilter(WarnFilter())
warnh.setFormatter(fmt)
this_logger.addHandler(errh)
this_logger.addHandler(warnh)
return errh, warnh
def get_my_capture(this_logger, handler):
this_logger.removeHandler(handler)
strings = handler.stream.getvalue()
handler.stream.close()
return strings
def validateSingleURI(service, URI, uriName='', expectedType=None, expectedJson=None, parent=None):
# rs-assertion: 9.4.1
# Initial startup here
my_logger.log(logging.INFO-1,"\n*** %s, %s", uriName, URI)
my_logger.info("\n*** %s", URI)
my_logger.log(logging.INFO-1,"\n*** {}, {}".format(expectedType, expectedJson is not None))
counts = Counter()
results, messages = OrderedDict(), OrderedDict()
ehandler, whandler = create_logging_capture(my_logger)
me = {'uri': URI, 'success': False, 'counts': counts, 'messages': messages,
'errors': '', 'warns': '', 'rtime': '', 'rcode': 0,
'fulltype': '', 'context': '...', 'payload': {}}
results[uriName] = me
# check for @odata mandatory stuff
# check for version numbering problems # check id if its the same as URI
# check @odata.context instead of local. Realize that @odata is NOT a "property"
# Attempt to get a list of properties
if URI is None:
URI = '/Missing URI Link'
if parent: URI = str(parent.payload.get('@odata.id')) + URI
my_logger.warning('Tool appears to be missing vital URI information, replacing URI w/: {}'.format(URI))
# Generate dictionary of property info
try:
if expectedJson is None:
ret = service.callResourceURI(URI)
success, me['payload'], response, me['rtime'] = ret
me['rcode'] = response.status
else:
success, me['payload'], me['rcode'], me['rtime'] = True, expectedJson, -1, 0
response = None
if not success:
my_logger.error('URI did not return resource {}'.format(URI))
counts['failGet'] += 1
me['warns'], me['errors'] = get_my_capture(my_logger, whandler), get_my_capture(my_logger, ehandler)
return False, counts, results, None, None
# verify basic odata strings
if results[uriName]['payload'] is not None:
successPayload, odataMessages = checkPayloadConformance(me['payload'], URI)
for m in odataMessages:
msg = create_entry(m, *odataMessages[m])
messages[msg.name] = msg
my_type = me['payload'].get('@odata.type', expectedType)
me['fulltype'] = str(my_type)
if my_type is None:
redfish_obj = None
else:
# TODO: don't have the distinction between Property Type and a Normal Type
if isinstance(my_type, catalog.RedfishType):
my_type = my_type.fulltype
redfish_schema = service.catalog.getSchemaDocByClass(my_type)
redfish_type = redfish_schema.getTypeInSchemaDoc(my_type)
redfish_obj = catalog.RedfishObject(redfish_type, 'Object', parent=parent).populate(me['payload']) if redfish_type else None
if redfish_obj:
me['fulltype'] = redfish_obj.Type.fulltype
else:
counts['problemResource'] += 1
me['warns'], me['errors'] = get_my_capture(my_logger, whandler), get_my_capture(my_logger, ehandler)
return False, counts, results, None, None
except traverse.AuthenticationError as e:
raise # re-raise exception
except Exception as e:
my_logger.log(logging.INFO-1, 'Exception caught while creating ResourceObj', exc_info=1)
my_logger.error('Unable to gather property info for URI {}: {}'.format(URI, repr(e)))
counts['exceptionResource'] += 1
me['warns'], me['errors'] = get_my_capture(my_logger, whandler), get_my_capture(my_logger, ehandler)
return False, counts, results, None, None
counts['passGet'] += 1
# verify odata_id properly resolves to its parent if holding fragment
odata_id = me['payload'].get('@odata.id')
if odata_id is not None and '#' in odata_id:
if parent is not None:
payload_resolve = navigateJsonFragment(parent.payload, URI)
if parent.payload.get('@odata.id') not in URI:
my_logger.info('@odata.id of ReferenceableMember was referenced elsewhere...'.format(odata_id))
elif payload_resolve is None:
my_logger.error('@odata.id of ReferenceableMember does not contain a valid JSON pointer for this payload: {}'.format(odata_id))
counts['badOdataIdResolution'] += 1
elif payload_resolve != me['payload']:
my_logger.error('@odata.id of ReferenceableMember does not point to the correct object: {}'.format(odata_id))
counts['badOdataIdResolution'] += 1
else:
my_logger.warn('No parent found with which to test @odata.id of ReferenceableMember')
if service.config['uricheck']:
my_uris = redfish_obj.Type.getUris()
if odata_id is not None and redfish_obj.Populated and len(my_uris) > 0:
if redfish_obj.HasValidUri:
counts['passRedfishUri'] += 1
else:
if '/Oem/' in odata_id:
counts['warnRedfishUri'] += 1
messages['@odata.id'].result = 'WARN'
my_logger.warning('URI {} does not match the following required URIs in Schema of {}'.format(odata_id, redfish_obj.Type))
else:
counts['failRedfishUri'] += 1
messages['@odata.id'].result = 'FAIL'
my_logger.error('URI {} does not match the following required URIs in Schema of {}'.format(odata_id, redfish_obj.Type))
if response and response.getheader('Allow'):
allowed_responses = [x.strip().upper() for x in response.getheader('Allow').split(',')]
if not redfish_obj.Type.CanInsert and 'POST' in allowed_responses:
my_logger.error('Allow header should NOT contain POST for {}'.format(redfish_obj.Type))
counts['failAllowHeader'] += 1
if not redfish_obj.Type.CanDelete and 'DELETE' in allowed_responses:
my_logger.error('Allow header should NOT contain DELETE for {}'.format(redfish_obj.Type))
counts['failAllowHeader'] += 1
if not redfish_obj.Type.CanUpdate and any([x in allowed_responses for x in ['PATCH', 'PUT']]):
my_logger.warning('Allow header should NOT contain PATCH or PUT for {}'.format(redfish_obj.Type))
counts['warnAllowHeader'] += 1
if not successPayload:
counts['failPayloadError'] += 1
my_logger.error(str(URI) + ': payload error, @odata property non-conformant',)
# if URI was sampled, get the notation text from traverseService.uri_sample_map
results[uriName]['uri'] = (str(URI))
results[uriName]['context'] = createContext(me['fulltype'])
results[uriName]['origin'] = redfish_obj.Type.owner.parent_doc.name
results[uriName]['success'] = True
my_logger.info("\t Type (%s), GET SUCCESS (time: %s)", me['fulltype'], me['rtime'])
for prop_name, prop in redfish_obj.properties.items():
try:
if not prop.HasSchema and not prop.Exists:
counts['skipNoSchema'] += 1
continue
elif not prop.HasSchema:
my_logger.error('No Schema for property {}'.format(prop.Name))
counts['errorNoSchema'] += 1
continue
propMessages, propCounts = checkPropertyConformance(service, prop_name, prop)
propMessages = {x:create_entry(x, *y) if isinstance(y, tuple) else y for x,y in propMessages.items()}
if not 'MessageRegistry.MessageRegistry' in redfish_obj.Type.getTypeTree():
if '@Redfish.Copyright' in propMessages:
modified_entry = propMessages['@Redfish.Copyright']
modified_entry.result = 'FAIL'
my_logger.error('@Redfish.Copyright is only allowed for mockups, and should not be allowed in official implementations')
messages.update(propMessages)
counts.update(propCounts)
except traverse.AuthenticationError as e:
raise # re-raise exception
except Exception as ex:
my_logger.log(logging.INFO-1, 'Exception caught while validating single URI', exc_info=1)
my_logger.error('{}: Could not finish check on this property ({})'.format(prop_name, str(ex)))
propMessages[prop_name] = create_entry(prop_name, '', '', prop.Exists, 'exception')
counts['exceptionPropCheck'] += 1
SchemaFullType, jsonData = me['fulltype'], me['payload']
SchemaNamespace, SchemaType = getNamespace(SchemaFullType), getType(SchemaFullType)
# List all items checked and unchecked
# current logic does not check inside complex types
fmt = '%-30s%30s'
my_logger.log(logging.INFO-1,'%s, %s, %s', uriName, SchemaNamespace, SchemaType)
for key in jsonData:
my_logger.log(logging.INFO-1,fmt % (key, messages[key].result if key in messages else 'Exists, no schema check'))
allowAdditional = redfish_obj.Type.HasAdditional
for key in [k for k in jsonData if k not in messages and k not in redfish_obj.properties and '@' not in k]:
# note: extra messages for "unchecked" properties
item = jsonData.get(key)
if not allowAdditional:
my_logger.error('{} not defined in schema {} (check version, spelling and casing)'.format(key, SchemaNamespace))
counts['failAdditional'] += 1
messages[key] = create_entry(key, displayValue(item), '-', '-', 'FAIL')
else:
my_logger.warn('{} not defined in schema {} (check version, spelling and casing)'.format(key, SchemaNamespace))
counts['unverifiedAdditional'] += 1
messages[key] = create_entry(key, displayValue(item), '-', '-', 'Additional')
fuzz = catalog.get_fuzzy_property(key, redfish_obj.properties)
if fuzz != key and fuzz in redfish_obj.properties:
messages[fuzz] = create_entry(fuzz, '-', '-', '-', 'INVALID')
my_logger.error('Attempting {} (from {})?'.format(fuzz, key))
my_new_obj = redfish_obj.properties[fuzz].populate(item)
new_msgs, new_counts = checkPropertyConformance(service, key, my_new_obj)
new_msgs = {x:create_entry(x, *y) for x,y in new_msgs.items()}
messages.update(new_msgs)
counts.update(new_counts)
counts['invalidNamedProperty'] += 1
for key in messages:
if key not in jsonData:
my_logger.log(logging.INFO-1,fmt % (key, messages[key].result))
results[uriName]['warns'], results[uriName]['errors'] = get_my_capture(my_logger, whandler), get_my_capture(my_logger, ehandler)
pass_val = len(results[uriName]['errors']) == 0
for key in counts:
if any(x in key for x in ['problem', 'fail', 'bad', 'exception']):
pass_val = False
break
my_logger.info("\t {}".format('PASS' if pass_val else' FAIL...'))
my_logger.log(logging.INFO-1,'%s, %s', SchemaFullType, counts)
# Get all links available
my_logger.debug(redfish_obj.getLinks())
return True, counts, results, redfish_obj.getLinks(), redfish_obj
def validateURITree(service, URI, uriName, expectedType=None, expectedJson=None, parent=None, allLinks=None, inAnnotation=False):
# from given URI, validate it, then follow its links like nodes
# Other than expecting a valid URI, on success (real URI) expects valid links
# valid links come from getAllLinks, includes info such as expected values, etc
# as long as it is able to pass that info, should not crash
# If this is our first called URI
top = allLinks is None
if top: allLinks = set()
allLinks.add(URI)
refLinks = []
if inAnnotation and service.config['uricheck']:
service.catalog.flags['ignore_uri_checks'] = True
validateSuccess, counts, results, links, thisobj = validateSingleURI(service, URI, uriName, expectedType, expectedJson, parent)
if inAnnotation and service.config['uricheck']:
service.catalog.flags['ignore_uri_checks'] = False
# If successful and a MessageRegistryFile...
if validateSuccess and 'MessageRegistryFile.MessageRegistryFile' in thisobj.Type.getTypeTree():
# thisobj['Location'].Collection[0]['Uri'].Exists
if 'Location' in thisobj:
for sub_obj in thisobj['Location'].Collection:
if 'Uri' in sub_obj:
links.append(sub_obj)
# If successful...
if validateSuccess:
# Bring Registries to Front if possible
log_entries = [x for x in links if 'LogEntry' in x.Type.fulltype]
links = [x for x in links if 'LogEntry' not in x.Type.fulltype] + log_entries[:15] # Pare down logentries
for link in sorted(links, key=lambda x: (x.Type.fulltype != 'Registries.Registries')):
if link is None or link.Value is None:
my_logger.warning('Link is None, does it exist?')
continue
# get Uri or @odata.id
link_destination = link.Value.get('@odata.id', link.Value.get('Uri'))
if link.Type.Excerpt:
continue
if any(x in str(link.parent.Type) or x in link.Name for x in ['RelatedItem', 'Redundancy', 'Links', 'OriginOfCondition']):
refLinks.append((link, thisobj))
continue
if link_destination in allLinks:
counts['repeat'] += 1
continue
elif link_destination is None:
errmsg = 'URI for NavigationProperty is missing {}'.format(uriName)
my_logger.error(errmsg)
results[uriName]['errors'] += '\n' + errmsg
counts['errorMissingOdata'] += 1
continue
elif link_destination.split('#')[0].endswith('/'):
# (elegantly) add warn message to resource html
warnmsg = 'URI acquired ends in slash: {}'.format(link_destination)
my_logger.warning(warnmsg)
results[uriName]['warns'] += '\n' + warnmsg
counts['warnTrailingSlashLink'] += 1
newLink = ''.join(link_destination.split('/')[:-1])
if newLink in allLinks:
counts['repeat'] += 1
continue
if link.Type is not None and link.Type.AutoExpand:
returnVal = validateURITree(service, link_destination, uriName + ' -> ' + link.Name, link.Type, link.Value, parent, allLinks, link.InAnnotation)
else:
returnVal = validateURITree(service, link_destination, uriName + ' -> ' + link.Name, parent=parent, allLinks=allLinks, inAnnotation=link.InAnnotation)
success, linkCounts, linkResults, xlinks, xobj = returnVal
my_logger.log(logging.INFO-1,'%s, %s', link.Name, linkCounts)
refLinks.extend(xlinks)
if not success:
counts['unvalidated'] += 1
results.update(linkResults)
if top:
# TODO: consolidate above code block with this
for link in refLinks:
link, refparent = link
# get Uri or @odata.id
if link is None or link.Value is None:
my_logger.warning('Link is None, does it exist?')
continue
link_destination = link.Value.get('@odata.id', link.Value.get('Uri'))
if link.Type.Excerpt:
continue
elif link_destination is None:
errmsg = 'Referenced URI for NavigationProperty is missing {} {}'.format(link_destination, link.Name, link.parent)
my_logger.error(errmsg)
results[uriName]['errors'] += '\n' + errmsg
counts['errorMissingRefOdata'] += 1
continue
elif link_destination.split('#')[0].endswith('/'):
# (elegantly) add warn message to resource html
warnmsg = 'Referenced URI acquired ends in slash: {}'.format(link_destination)
my_logger.warning(warnmsg)
results[uriName]['warns'] += '\n' + warnmsg
counts['warnTrailingSlashRefLink'] += 1
newLink = ''.join(link_destination.split('/')[:-1])
if newLink in allLinks:
counts['repeat'] += 1
continue
if link_destination not in allLinks:
my_logger.log(logging.INFO-1,'{}, {}'.format(link.Name, link))
counts['reflink'] += 1
else:
continue
my_link_type = link.Type.fulltype
success, my_data, _, _ = service.callResourceURI(link_destination)
# Using None instead of refparent simply because the parent is not where the link comes from
returnVal = validateURITree(service, link_destination, uriName + ' -> ' + link.Name,
my_link_type, my_data, None, allLinks)
success, linkCounts, linkResults, xlinks, xobj = returnVal
# refLinks.update(xlinks)
if not success:
counts['unvalidatedRef'] += 1
if 'OriginOfCondition' in link.Name or 'OriginOfCondition' in link.parent.Name:
my_logger.info('Link was unsuccessful, but non mandatory')
else:
results.update(linkResults)
else:
results.update(linkResults)
return validateSuccess, counts, results, refLinks, thisobj
| 47.72796
| 166
| 0.623654
|
c46d19f1d055a60083200192596ef6fecf37ca64
| 8,846
|
py
|
Python
|
.history/pages/intro_20220303154629.py
|
rypaik/Streamlit_Ref
|
5ce11cecbe8307238463c126b88b3beed66c99fa
|
[
"MIT"
] | null | null | null |
.history/pages/intro_20220303154629.py
|
rypaik/Streamlit_Ref
|
5ce11cecbe8307238463c126b88b3beed66c99fa
|
[
"MIT"
] | null | null | null |
.history/pages/intro_20220303154629.py
|
rypaik/Streamlit_Ref
|
5ce11cecbe8307238463c126b88b3beed66c99fa
|
[
"MIT"
] | null | null | null |
"""
Off Multipage Cheatsheet
https://github.com/daniellewisDL/streamlit-cheat-sheet
@daniellewisDL : https://github.com/daniellewisDL
"""
import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
# Initial page config
st.set_page_config(
page_title='Code Compendium Intro Page',
layout="wide",
# initial_sidebar_state="expanded",
)
# col2.title("Table of contents")
# col2.write("http://localhost:8502/#display-progress-and-status")
# toc.header("Header 1")
# toc.header("Header 2")
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# Thanks to streamlitopedia for the following code snippet
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
# sidebar
# def cs_sidebar():
# st.sidebar.markdown('''[<img src='data:image/png;base64,{}' class='img-fluid' width=32 height=32>](https://streamlit.io/)'''.format(img_to_bytes("logomark_website.png")), unsafe_allow_html=True)
# st.sidebar.header('Streamlit cheat sheet')
# st.sidebar.markdown('''
# <small>Summary of the [docs](https://docs.streamlit.io/en/stable/api.html), as of [Streamlit v1.0.0](https://www.streamlit.io/).</small>
# ''', unsafe_allow_html=True)
# st.sidebar.markdown('__How to install and import__')
# st.sidebar.code('$ pip install streamlit')
# st.sidebar.markdown('Import convention')
# st.sidebar.code('>>> import streamlit as st')
# st.sidebar.markdown('__Add widgets to sidebar__')
# st.sidebar.code('''
# st.sidebar.<widget>
# >>> a = st.sidebar.radio(\'R:\',[1,2])
# ''')
# st.sidebar.markdown('__Command line__')
# st.sidebar.code('''
# $ streamlit --help
# $ streamlit run your_script.py
# $ streamlit hello
# $ streamlit config show
# $ streamlit cache clear
# $ streamlit docs
# $ streamlit --version
# ''')
# st.sidebar.markdown('__Pre-release features__')
# st.sidebar.markdown('[Beta and experimental features](https://docs.streamlit.io/en/stable/api.html#beta-and-experimental-features)')
# st.sidebar.code('''
# pip uninstall streamlit
# pip install streamlit-nightly --upgrade
# ''')
# st.sidebar.markdown('''<small>[st.cheat_sheet v1.0.0](https://github.com/daniellewisDL/streamlit-cheat-sheet) | Oct 2021</small>''', unsafe_allow_html=True)
# return None
##########################
# Main body of cheat sheet
##########################
def cs_body():
col1, col2 = st.columns(2)
col1.header('Ryan Paik')
col1.markdown(
'''
*“You don't learn to walk by following rules. You learn by doing, and by falling over.”*
-Richard Branson
-----
''')
col1.subheader("Welcome to my Code Compendium.")
col1.markdwon('''
This website/webapp is my personal cheatsheet for of all the code snippets that I have needed over the past 2 years. This ended up being a quick detour into Streamlit that I fell in love with while I was building flask api's.
-----
**Programming is only as deep as you want to dive in.**
This webapp features the basic code snippets from all the "googling" from programming I have done.
I have taken the plunge and have created my own markdown notebooks organizing information from quick solution tidbits to documentation for programming languages.
Please visit my github for practical code and my research notebooks:
*[rypaik (Ryan Paik) · GitHub](https://github.com/rypaik)*
If you would like access to my Gist please email me.
ryanpaik@protonmail.com
-----
**Bio:**
Currently a Sophomore at University of Illinois at Urbana-Champaign
Working Nights on my degree from the System Engineering Program
**Hobbies:**
Trying to become a real guitar hero minus the game system, playing Valorant with the St Mark's crew, getting interesting eats no matter where I am, and playing toss with my baseball field rat of a cousin.
The newest hobby is figuring out what I can build with all the new breakthroughs in technology.
**Currently Working On**
Frameworks and Languages:
- Flask, Django, FastAPI, PyTorch, Streamlit, OpenCV, shell scripting, Python, C++
Databases:
- Postgres, Redis, MongoDB, and applicable ORMs
When I can get up for Air:
- React, swift(ios), Rust, GO!!
- Find a team to get a paper In Arxiv
**This site will be constantly updated as long as I program. Feel free to pass on the URL.**
''')
# col2.subheader('Display interactive widgets')
# col2.code('''
# st.button('Hit me')
# st.download_button('On the dl', data)
# st.checkbox('Check me out')
# st.radio('Radio', [1,2,3])
# st.selectbox('Select', [1,2,3])
# st.multiselect('Multiselect', [1,2,3])
# st.slider('Slide me', min_value=0, max_value=10)
# st.select_slider('Slide to select', options=[1,'2'])
# st.text_input('Enter some text')
# st.number_input('Enter a number')
# st.text_area('Area for textual entry')
# st.date_input('Date input')
# st.time_input('Time entry')
# st.file_uploader('File uploader')
# st.color_picker('Pick a color')
# ''')
# col2.write('Use widgets\' returned values in variables:')
# col2.code('''
# >>> for i in range(int(st.number_input('Num:'))): foo()
# >>> if st.sidebar.selectbox('I:',['f']) == 'f': b()
# >>> my_slider_val = st.slider('Quinn Mallory', 1, 88)
# >>> st.write(slider_val)
# ''')
# # Control flow
# col2.subheader('Control flow')
# col2.code('''
# st.stop()
# ''')
# # Lay out your app
# col2.subheader('Lay out your app')
# col2.code('''
# st.form('my_form_identifier')
# st.form_submit_button('Submit to me')
# st.container()
# st.columns(spec)
# >>> col1, col2 = st.columns(2)
# >>> col1.subheader('Columnisation')
# st.expander('Expander')
# >>> with st.expander('Expand'):
# >>> st.write('Juicy deets')
# ''')
# col2.write('Batch widgets together in a form:')
# col2.code('''
# >>> with st.form(key='my_form'):
# >>> text_input = st.text_input(label='Enter some text')
# >>> submit_button = st.form_submit_button(label='Submit')
# ''')
# # Display code
# col2.subheader('Display code')
# col2.code('''
# st.echo()
# >>> with st.echo():
# >>> st.write('Code will be executed and printed')
# ''')
# # Display progress and status
# col2.subheader('Display progress and status')
# col2.code('''
# st.progress(progress_variable_1_to_100)
# st.spinner()
# >>> with st.spinner(text='In progress'):
# >>> time.sleep(5)
# >>> st.success('Done')
# st.balloons()
# st.error('Error message')
# st.warning('Warning message')
# st.info('Info message')
# st.success('Success message')
# st.exception(e)
# ''')
# # Placeholders, help, and options
# col2.subheader('Placeholders, help, and options')
# col2.code('''
# st.empty()
# >>> my_placeholder = st.empty()
# >>> my_placeholder.text('Replaced!')
# st.help(pandas.DataFrame)
# st.get_option(key)
# st.set_option(key, value)
# st.set_page_config(layout='wide')
# ''')
# # Mutate data
# col2.subheader('Mutate data')
# col2.code('''
# DeltaGenerator.add_rows(data)
# >>> my_table = st.table(df1)
# >>> my_table.add_rows(df2)
# >>> my_chart = st.line_chart(df1)
# >>> my_chart.add_rows(df2)
# ''')
# # Optimize performance
# col2.subheader('Optimize performance')
# col2.code('''
# @st.cache
# >>> @st.cache
# ... def fetch_and_clean_data(url):
# ... # Mutate data at url
# ... return data
# >>> # Executes d1 as first time
# >>> d1 = fetch_and_clean_data(ref1)
# >>> # Does not execute d1; returns cached value, d1==d2
# >>> d2 = fetch_and_clean_data(ref1)
# >>> # Different arg, so function d1 executes
# >>> d3 = fetch_and_clean_data(ref2)
# ''')
# col2.subheader('Other key parts of the API')
# col2.markdown('''
# <small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
# <small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
# <small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
# <small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
# ''', unsafe_allow_html=True)
# Column 3 TOC Generator
# col3.subheader('test')
# toc = Toc(col3)
# # col2.title("Table of contents")
# col3.write("http://localhost:8502/#display-progress-and-status", unsafe_allow_html=True)
# toc.header("Header 1")
# toc.header("Header 2")
# toc.generate()
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# return None
# Run main()
# if __name__ == '__main__':
# main()
# def main():
def app():
# cs_sidebar()
cs_body()
return None
| 27.302469
| 228
| 0.658829
|
eb9012c4195554d6dc1652aaffe5724a2fc7a84a
| 1,764
|
py
|
Python
|
helpers/db_models.py
|
Holovin/D_UxRepostBot_Legacy
|
858bbfc4193758a268cc8c6f2f4ee8637f20351d
|
[
"MIT"
] | 2
|
2017-06-27T06:08:40.000Z
|
2017-07-04T22:13:29.000Z
|
helpers/db_models.py
|
Holovin/D_UxRepostBot_Legacy
|
858bbfc4193758a268cc8c6f2f4ee8637f20351d
|
[
"MIT"
] | null | null | null |
helpers/db_models.py
|
Holovin/D_UxRepostBot_Legacy
|
858bbfc4193758a268cc8c6f2f4ee8637f20351d
|
[
"MIT"
] | null | null | null |
from peewee import *
from pytz import timezone
from datetime import datetime
from config import Config
from helpers.db_connect import Database
class BaseModel(Model):
class Meta:
database = Database.get_db()
class Settings(BaseModel):
id = PrimaryKeyField(primary_key=True)
# channel for check stat
channel_name = CharField(null=False)
# stat will send here
print_to = CharField(null=False)
# admin id for logs
admin_to = CharField(null=False)
# min number of subs (>0) for trigger send stat
trigger_min_sub = IntegerField(null=False)
# min number of unsubs (<0) for trigger send stat
trigger_min_unsub = IntegerField(null=False)
# min number of flow-users for trigger send stat
trigger_min_flow = IntegerField(null=False)
# trigger when users % every_odd == 0 (aka 'get 9000')
trigger_every_odd = IntegerField(null=False, default=100)
# trigger when new day occur
trigger_new_day = BooleanField(null=False, default=True)
# total users
stat_total_users = IntegerField(null=False, default=0)
# users from previous write
stat_period_users = IntegerField(null=False, default=0)
# day users
stat_day_users = IntegerField(null=False, default=0)
# delta users
stat_delta_users = IntegerField(null=False, default=0)
# max users
stat_max_users = IntegerField(null=False, default=0)
# last check time
stat_last_check_time = DateTimeField(null=False, default=datetime.now(timezone(Config.TIMEZONE)))
# no more 1 message more than ban_minutes
write_ban_minutes = IntegerField(null=False, default=1)
# last write time
write_last_time = DateTimeField(null=False, default=datetime.now(timezone(Config.TIMEZONE)))
| 27.5625
| 101
| 0.721088
|
d2ab44aba46a7d4c2a0d875c996252834dc96d1d
| 11,676
|
py
|
Python
|
src/rogerthat/restapi/messaging.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/rogerthat/restapi/messaging.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/rogerthat/restapi/messaging.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from types import NoneType
from mcfw.properties import object_factory, azzert
from mcfw.restapi import rest
from mcfw.rpc import returns, arguments
from rogerthat.api.messaging import ackMessage as ackMessageApi, lockMessage as lockMessageApi, \
sendMessage as sendMessageApi, submitTextLineForm as submitTextLineFormApi, \
submitTextBlockForm as submitTextBlockFormApi, submitAutoCompleteForm as submitAutoCompleteFormApi, \
submitSingleSelectForm as submitSingleSelectFormApi, submitMultiSelectForm as submitMultiSelectFormApi, \
submitSingleSliderForm as submitSingleSliderFormApi, submitRangeSliderForm as submitRangeSliderFormApi, \
submitDateSelectForm as submitDateSelectFormApi, submitPhotoUploadForm as submitPhotoUploadFormApi
from rogerthat.dal.messaging import get_messages, get_message_key, get_root_message, get_message_history, \
get_message_thread, get_service_inbox, get_message
from rogerthat.to import MESSAGE_TYPE_MAPPING, ROOT_MESSAGE_TYPE_TO_MAPPING, MESSAGE_TYPE_TO_MAPPING
from rogerthat.to.messaging import SendMessageResponseTO, SendMessageRequestTO, AckMessageResponseTO, \
AckMessageRequestTO, MessageTO, RootMessageTO, LockMessageResponseTO, LockMessageRequestTO, MessageReceivedResponseTO, \
MessageReceivedRequestTO, MessageListTO, RootMessageListTO
from rogerthat.to.messaging.forms import SubmitTextLineFormResponseTO, SubmitTextLineFormRequestTO, \
SubmitTextBlockFormResponseTO, SubmitTextBlockFormRequestTO, SubmitAutoCompleteFormResponseTO, \
SubmitAutoCompleteFormRequestTO, SubmitSingleSelectFormResponseTO, SubmitSingleSelectFormRequestTO, \
SubmitMultiSelectFormResponseTO, SubmitMultiSelectFormRequestTO, SubmitSingleSliderFormResponseTO, \
SubmitSingleSliderFormRequestTO, SubmitRangeSliderFormResponseTO, SubmitRangeSliderFormRequestTO, \
SubmitDateSelectFormResponseTO, SubmitDateSelectFormRequestTO, SubmitPhotoUploadFormResponseTO, \
SubmitPhotoUploadFormRequestTO
MESSAGES_BATCH_SIZE = 25
@rest("/mobi/rest/messaging/send", "post")
@returns(SendMessageResponseTO)
@arguments(request=SendMessageRequestTO)
def sendMessage(request):
return sendMessageApi(request)
@rest("/mobi/rest/messaging/ack", "post")
@returns(AckMessageResponseTO)
@arguments(request=AckMessageRequestTO)
def ackMessage(request):
return ackMessageApi(request)
@rest("/mobi/rest/messaging/delete_conversation", "post")
@returns(NoneType)
@arguments(parent_message_key=unicode)
def deleteConversation(parent_message_key):
from rogerthat.bizz.messaging import delete_conversation
from rogerthat.rpc import users
delete_conversation(users.get_current_user(), parent_message_key)
@rest("/mobi/rest/messaging/submitTextLineForm", "post")
@returns(SubmitTextLineFormResponseTO)
@arguments(request=SubmitTextLineFormRequestTO)
def submitTextLineForm(request):
return submitTextLineFormApi(request)
@rest("/mobi/rest/messaging/submitTextBlockForm", "post")
@returns(SubmitTextBlockFormResponseTO)
@arguments(request=SubmitTextBlockFormRequestTO)
def submitTextBlockForm(request):
return submitTextBlockFormApi(request)
@rest("/mobi/rest/messaging/submitAutoCompleteForm", "post")
@returns(SubmitAutoCompleteFormResponseTO)
@arguments(request=SubmitAutoCompleteFormRequestTO)
def submitAutoCompleteForm(request):
return submitAutoCompleteFormApi(request)
@rest("/mobi/rest/messaging/submitSingleSelectForm", "post")
@returns(SubmitSingleSelectFormResponseTO)
@arguments(request=SubmitSingleSelectFormRequestTO)
def submitSingleSelectForm(request):
return submitSingleSelectFormApi(request)
@rest("/mobi/rest/messaging/submitMultiSelectForm", "post")
@returns(SubmitMultiSelectFormResponseTO)
@arguments(request=SubmitMultiSelectFormRequestTO)
def submitMultiSelectForm(request):
return submitMultiSelectFormApi(request)
@rest("/mobi/rest/messaging/submitDateSelectForm", "post")
@returns(SubmitDateSelectFormResponseTO)
@arguments(request=SubmitDateSelectFormRequestTO)
def submitDateSelectForm(request):
return submitDateSelectFormApi(request)
@rest("/mobi/rest/messaging/submitSingleSliderForm", "post")
@returns(SubmitSingleSliderFormResponseTO)
@arguments(request=SubmitSingleSliderFormRequestTO)
def submitSingleSliderForm(request):
return submitSingleSliderFormApi(request)
@rest("/mobi/rest/messaging/submitRangeSliderForm", "post")
@returns(SubmitRangeSliderFormResponseTO)
@arguments(request=SubmitRangeSliderFormRequestTO)
def submitRangeSliderForm(request):
return submitRangeSliderFormApi(request)
@rest("/mobi/rest/messaging/submitPhotoUploadForm", "post")
@returns(SubmitPhotoUploadFormResponseTO)
@arguments(request=SubmitPhotoUploadFormRequestTO)
def submitPhotoUploadForm(request):
return submitPhotoUploadFormApi(request)
@rest("/mobi/rest/messaging/lock", "post")
@returns(LockMessageResponseTO)
@arguments(request=LockMessageRequestTO)
def lockMessage(request):
return lockMessageApi(request)
@rest("/mobi/rest/messaging/received", "post")
@returns(MessageReceivedResponseTO)
@arguments(request=MessageReceivedRequestTO)
def messageReceived(request):
from rogerthat.rpc import users
user = users.get_current_user()
from rogerthat.bizz.messaging import message_received
message_received(user, get_message_key(request.message_key, request.message_parent_key), request.received_timestamp)
@rest("/mobi/rest/messaging/get", "get")
@returns(RootMessageListTO)
@arguments(cursor=unicode)
def getMessages(cursor):
from rogerthat.rpc import users
user = users.get_current_user()
result = RootMessageListTO()
result.messages, result.cursor = _get_messages(cursor, user)
result.batch_size = MESSAGES_BATCH_SIZE
return result
@rest("/mobi/rest/messaging/get_single", "post")
@returns(object_factory("message_type", MESSAGE_TYPE_TO_MAPPING))
@arguments(message_key=unicode, parent_message_key=unicode)
def getSingleMessage(message_key, parent_message_key):
from rogerthat.rpc import users
user = users.get_current_user()
message = get_message(message_key, parent_message_key)
return _convert_to_tos(user, [message])[0]
@rest("/mobi/rest/messaging/get_root_message", "get")
@returns(object_factory("message_type", ROOT_MESSAGE_TYPE_TO_MAPPING))
@arguments(message_key=unicode)
def getRootMessage(message_key):
from rogerthat.rpc import users
user = users.get_current_user()
messages = get_root_message(user, message_key)
member = user if not messages[0].sharedMembers and messages[0].sender != user else None
message = RootMessageTO.fromMessage(messages[0], member)
message.messages = [MessageTO.fromMessage(m, member) for m in messages[1:]]
return message
@rest("/mobi/rest/messaging/get_service_inbox", "post")
@returns(MessageListTO)
@arguments(cursor=unicode)
def getServiceInbox(cursor):
from rogerthat.rpc import users
user = users.get_current_user()
messages, cursor = get_service_inbox(user, cursor)
result = MessageListTO()
result.cursor = unicode(cursor)
result.messages = _convert_to_tos(user, messages)
return result
@rest("/mobi/rest/messaging/history", "post")
@returns(MessageListTO)
@arguments(query_param=unicode, cursor=unicode)
def getMessageHistory(query_param, cursor):
from rogerthat.rpc import users
user = users.get_current_user()
member = users.User(query_param)
messages, new_cursor, thread_sizes = get_message_history(user, member, cursor, MESSAGES_BATCH_SIZE)
history = MessageListTO()
messages = _convert_to_tos(user, messages, thread_sizes)
history.messages = messages
history.cursor = unicode(new_cursor)
history.batch_size = MESSAGES_BATCH_SIZE
return history
@rest("/mobi/rest/messaging/thread", "get")
@returns([MessageTO])
@arguments(thread_key=unicode)
def getMessageThread(thread_key):
from rogerthat.rpc import users
user = users.get_current_user()
messages = get_message_thread(thread_key)
azzert(user in messages[0].members) # security check
result = list()
for message in messages:
member = user if not message.sharedMembers and message.sender != user else None
message_type_descr = MESSAGE_TYPE_MAPPING[message.TYPE]
args = [message]
if message_type_descr.include_member_in_conversion:
args.append(member)
result.append(message_type_descr.model_to_conversion(*args))
return result
@rest("/mobi/rest/messaging/mark_messages_as_read", "post")
@returns(NoneType)
@arguments(parent_message_key=unicode, message_keys=[unicode])
def markMessagesAsRead(parent_message_key, message_keys):
from rogerthat.rpc import users
from rogerthat.bizz.messaging import markMessagesAsRead as markMessagesAsReadBizz
user = users.get_current_user()
markMessagesAsReadBizz(user, parent_message_key, message_keys)
@rest("/mobi/rest/messaging/dismiss_conversation", "post")
@returns(NoneType)
@arguments(parent_message_key=unicode, message_keys=[unicode], timestamp=int)
def dismissConversation(parent_message_key, message_keys, timestamp):
from rogerthat.rpc import users
from rogerthat.bizz.messaging import ackMessage as ackMessageBizz
for key in message_keys:
ackMessageBizz(users.get_current_user(), key, None if key == parent_message_key else parent_message_key,
button_id=None, custom_reply=None, timestamp=timestamp)
def _convert_to_tos(user, messageList, thread_sizes=None):
messages = list()
for message in messageList:
member = user if not message.sharedMembers and message.sender != user else None
message_type_descr = MESSAGE_TYPE_MAPPING[message.TYPE]
args = [message]
if message_type_descr.include_member_in_conversion:
args.append(member)
to_obj = message_type_descr.model_to_conversion(*args)
if thread_sizes:
to_obj.thread_size = thread_sizes.get(message.key().name(), 0)
messages.append(to_obj);
return messages
def _arrangeMessageInTree(user, messageList):
messages = dict()
for message in messageList:
member = user if not message.sharedMembers and message.sender != user else None
message_type_descr = MESSAGE_TYPE_MAPPING[message.TYPE]
args = [message]
if message_type_descr.include_member_in_conversion:
args.append(member)
if message.isRootMessage:
messages[message.mkey] = message_type_descr.root_model_to_conversion(*args)
else:
messages[message.pkey].messages.append(message_type_descr.model_to_conversion(*args))
for message in messages.values():
message.messages = sorted(message.messages, key=lambda m:m.timestamp)
messages = sorted(messages.values(), key=lambda m:m.threadTimestamp, reverse=True)
return messages
def _get_messages(cursor, user):
messageList, cursor = get_messages(user, cursor, MESSAGES_BATCH_SIZE, user_only=True)
messages = _arrangeMessageInTree(user, messageList)
return messages, unicode(cursor)
| 43.405204
| 124
| 0.789911
|
99d425438100d4b6fab641372347d1f463eee942
| 7,342
|
py
|
Python
|
kubeflow/fairing/deployers/job/job.py
|
songm28/fairing
|
0c44992baa85e6bac7fbbd3c22d00f300b8186ba
|
[
"Apache-2.0"
] | null | null | null |
kubeflow/fairing/deployers/job/job.py
|
songm28/fairing
|
0c44992baa85e6bac7fbbd3c22d00f300b8186ba
|
[
"Apache-2.0"
] | null | null | null |
kubeflow/fairing/deployers/job/job.py
|
songm28/fairing
|
0c44992baa85e6bac7fbbd3c22d00f300b8186ba
|
[
"Apache-2.0"
] | 2
|
2020-05-11T07:48:28.000Z
|
2021-05-28T10:32:21.000Z
|
import logging
import json
import uuid
from kubernetes import client as k8s_client
from kubeflow.fairing import utils
from kubeflow.fairing.constants import constants
from kubeflow.fairing.kubernetes.manager import KubeManager
from kubeflow.fairing.deployers.deployer import DeployerInterface
logger = logging.getLogger(__name__)
class Job(DeployerInterface): #pylint:disable=too-many-instance-attributes
"""Handle all the k8s' template building for a training"""
def __init__(self, namespace=None, runs=1, output=None,
cleanup=True, labels=None, job_name=None,
stream_log=True, deployer_type=constants.JOB_DEPLOPYER_TYPE,
pod_spec_mutators=None, annotations=None, config_file=None,
context=None, client_configuration=None, persist_config=True, verify_ssl=True):
"""
:param namespace: k8s namespace where the training's components will be deployed.
:param runs: Number of training(s) to be deployed. Hyperparameter search
will generate multiple jobs.
:param output: output
:param cleanup: clean up deletes components after job finished
:param labels: labels to be assigned to the training job
:param job_name: name of the job
:param stream_log: stream the log?
:param deployer_type: type of deployer
:param pod_spec_mutators: pod spec mutators (Default value = None)
:param config_file: kubeconfig file, defaults to ~/.kube/config. Note that for the case
that the SDK is running in cluster and you want to operate in another remote
cluster, user must set config_file to load kube-config file explicitly.
:param context: kubernetes context
:param client_configuration: The kubernetes.client.Configuration to set configs to.
:param persist_config: If True, config file will be updated when changed
:param verify_ssl: use ssl verify or not, set in the client config
"""
if namespace is None:
self.namespace = utils.get_default_target_namespace()
else:
self.namespace = namespace
# Used as pod and job name
self.job_name = job_name
self.deployer_type = deployer_type
self.deployment_spec = None
self.runs = runs
self.output = output
self.backend = KubeManager(
config_file=config_file,
context=context,
client_configuration=client_configuration,
persist_config=persist_config,
verify_ssl=verify_ssl)
self.cleanup = cleanup
self.stream_log = stream_log
self.set_labels(labels, deployer_type)
self.set_anotations(annotations)
self.pod_spec_mutators = pod_spec_mutators or []
self.verify_ssl=verify_ssl
def set_anotations(self, annotations):
self.annotations = {}
if annotations:
self.annotations.update(annotations)
def set_labels(self, labels, deployer_type):
"""set labels for the pods of a deployed job
:param labels: dictionary of labels {label_name:label_value}
:param deployer_type: deployer type name
"""
self.labels = {'fairing-deployer': deployer_type}
if labels:
self.labels.update(labels)
def deploy(self, pod_spec): #pylint:disable=arguments-differ
"""deploy the training job using k8s client lib
:param pod_spec: pod spec of deployed training job
"""
self.job_id = str(uuid.uuid1())
self.labels['fairing-id'] = self.job_id
for fn in self.pod_spec_mutators:
fn(self.backend, pod_spec, self.namespace)
pod_template_spec = self.generate_pod_template_spec(pod_spec)
pod_template_spec.spec.restart_policy = 'Never'
pod_template_spec.spec.containers[0].name = 'fairing-job'
self.deployment_spec = self.generate_deployment_spec(pod_template_spec)
if self.output:
api = k8s_client.ApiClient()
job_output = api.sanitize_for_serialization(self.deployment_spec)
print(json.dumps(job_output))
name = self.create_resource()
logger.warning("The {} {} launched.".format(self.deployer_type, name))
if self.stream_log:
self.get_logs()
return name
def create_resource(self):
""" create job"""
self._created_job = self.backend.create_job(self.namespace, self.deployment_spec)
return self._created_job.metadata.name
def generate_pod_template_spec(self, pod_spec):
"""Generate a V1PodTemplateSpec initiazlied with correct metadata
and with the provided pod_spec
:param pod_spec: pod spec
"""
if not isinstance(pod_spec, k8s_client.V1PodSpec):
raise TypeError('pod_spec must be a V1PodSpec, but got %s'
% type(pod_spec))
if not self.annotations:
self.annotations = {'sidecar.istio.io/inject': 'false'}
else:
self.annotations['sidecar.istio.io/inject'] = 'false'
return k8s_client.V1PodTemplateSpec(
metadata=k8s_client.V1ObjectMeta(name="fairing-deployer",
annotations=self.annotations,
labels=self.labels),
spec=pod_spec)
def generate_deployment_spec(self, pod_template_spec):
"""Generate a V1Job initialized with correct completion and
parallelism (for HP search) and with the provided V1PodTemplateSpec
:param pod_template_spec: V1PodTemplateSpec
"""
if not isinstance(pod_template_spec, k8s_client.V1PodTemplateSpec):
raise TypeError("""pod_template_spec must be a V1PodTemplateSpec,
but got %s""" % type(pod_template_spec))
job_spec = k8s_client.V1JobSpec(
template=pod_template_spec,
parallelism=self.runs,
completions=self.runs,
backoff_limit=0,
)
return k8s_client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=k8s_client.V1ObjectMeta(
name=self.job_name,
generate_name=constants.JOB_DEFAULT_NAME,
labels=self.labels),
spec=job_spec
)
def get_logs(self):
""" get logs from the deployed job"""
self.backend.log(self._created_job.metadata.name,
self._created_job.metadata.namespace,
self.labels,
container="fairing-job")
if self.cleanup:
self.do_cleanup()
def do_cleanup(self):
""" clean up the pods after job finished"""
logger.warning("Cleaning up job {}...".format(self._created_job.metadata.name))
client_config=k8s_client.Configuration()
client_config.verify_ssl=self.verify_ssl
api_client=k8s_client.ApiClient(configuration=client_config)
k8s_client.BatchV1Api(api_client=api_client).delete_namespaced_job(
self._created_job.metadata.name,
self._created_job.metadata.namespace,
body=k8s_client.V1DeleteOptions(propagation_policy='Foreground'))
| 39.902174
| 96
| 0.646554
|
620c5ed8c4bee26f18fd819137fcdfa0b1ac59cd
| 1,208
|
py
|
Python
|
app.py
|
michal-siedlecki/messages-api
|
34adaaf6752e8d96a6f988a84ad28f2f81a501af
|
[
"MIT"
] | null | null | null |
app.py
|
michal-siedlecki/messages-api
|
34adaaf6752e8d96a6f988a84ad28f2f81a501af
|
[
"MIT"
] | null | null | null |
app.py
|
michal-siedlecki/messages-api
|
34adaaf6752e8d96a6f988a84ad28f2f81a501af
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import API_URL
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from messages import views
# A route to return messages_app info.
@app.route('/', methods=['GET'])
def info():
return views.info_view()
# A route to return all of the available entries in the system.
@app.route(f'/{API_URL}', methods=['GET'])
def list_view() -> object:
return views.messages_list_view()
# A route to get message details.
@app.route(f'/{API_URL}/<pk>', methods=['GET'])
def detail(pk) -> object:
return views.message_detail_view(pk)
# A route to create a new message.
@app.route(f'/{API_URL}', methods=['POST'])
def create() -> object:
return views.message_create_view()
# A route to create a new message.
@app.route(f'/{API_URL}/<pk>', methods=['PATCH'])
def update(pk) -> object:
return views.message_update_view(pk)
# A route to delete message.
@app.route(f'/{API_URL}/<pk>', methods=['DELETE'])
def delete(pk) -> object:
return views.message_delete_view(pk)
if __name__ == '__main__':
app.run()
| 22.792453
| 63
| 0.70447
|
7c05eab5ae1a593041a03bca3400a7e42841bb0f
| 578
|
py
|
Python
|
Python/test.py
|
zharmedia386/Data-Science-Stuff
|
40183c329e3b30c582c545c260ca7916f29e2f09
|
[
"MIT"
] | null | null | null |
Python/test.py
|
zharmedia386/Data-Science-Stuff
|
40183c329e3b30c582c545c260ca7916f29e2f09
|
[
"MIT"
] | null | null | null |
Python/test.py
|
zharmedia386/Data-Science-Stuff
|
40183c329e3b30c582c545c260ca7916f29e2f09
|
[
"MIT"
] | null | null | null |
Barang = ['kunci','ember','jaket','ban','mobil']
print(Barang)
# beberapa method yang bisa digunakan untuk memanipulasi list
# method untuk menambah data kedalam list
Barang.append('sepeda')
print(Barang)
Barang.extend('dompet')
print(Barang)
Barang.insert(3,'sepeda')
print(Barang)
# method untuk menghitung anggota
jumlahSepeda = Barang.count('sepeda')
print("Jumlah sepeda adalah: ",jumlahSepeda)
# meremove data
Barang.remove('sepeda')
print(Barang)
Barang.reverse()
print(Barang)
print("="*100)
Stuff = Barang.copy()
Stuff.append('gelas')
print(Stuff)
print(Barang)
| 18.645161
| 61
| 0.745675
|
41e139126f2fe37db90ac8bad4ad65b6608e295a
| 41,316
|
py
|
Python
|
dask/dataframe/tests/test_shuffle.py
|
eric-bonfadini/dask
|
c2278fece0d4fb4af1e63b6ca26e6a90f63b0fc3
|
[
"BSD-3-Clause"
] | null | null | null |
dask/dataframe/tests/test_shuffle.py
|
eric-bonfadini/dask
|
c2278fece0d4fb4af1e63b6ca26e6a90f63b0fc3
|
[
"BSD-3-Clause"
] | null | null | null |
dask/dataframe/tests/test_shuffle.py
|
eric-bonfadini/dask
|
c2278fece0d4fb4af1e63b6ca26e6a90f63b0fc3
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import multiprocessing as mp
import os
import pickle
import random
import string
import tempfile
from concurrent.futures import ProcessPoolExecutor
from copy import copy
from functools import partial
from unittest import mock
import numpy as np
import pandas as pd
import pytest
import dask
import dask.dataframe as dd
from dask import delayed
from dask.base import compute_as_if_collection
from dask.dataframe._compat import PANDAS_GT_120, assert_categorical_equal, tm
from dask.dataframe.shuffle import (
maybe_buffered_partd,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
remove_nans,
shuffle,
)
from dask.dataframe.utils import assert_eq, make_meta
from dask.optimization import cull
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [1, 4, 7]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [2, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [3, 6, 9]}, index=[9, 9, 9]),
}
meta = make_meta(
{"a": "i8", "b": "i8"}, index=pd.Index([], "i8"), parent_meta=pd.DataFrame()
)
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
shuffle_func = shuffle # conflicts with keyword argument
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_shuffle(shuffle):
s = shuffle_func(d, d.b, shuffle=shuffle)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = dask.get(s.dask, (s._name, 0))
y = dask.get(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions_task():
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle="tasks", npartitions=17, max_branch=4)
sc = s.compute()
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
def test_shuffle_npartitions_lt_input_partitions_task():
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=20)
s = shuffle(ddf, ddf.x, shuffle="tasks", npartitions=5, max_branch=2)
sc = s.compute()
assert s.npartitions == 5
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_non_series(method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(shuffle(d, d.b, shuffle=method), shuffle(d, "b", shuffle=method))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_index_with_dataframe(method):
res1 = shuffle(d, d[["b"]], shuffle=method).compute()
res2 = shuffle(d, ["b"], shuffle=method).compute()
res3 = shuffle(d, "b", shuffle=method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_from_one_partition_to_one_other(method):
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, "x", npartitions=i, shuffle=method)
assert len(a.compute(scheduler="sync")) == len(b.compute(scheduler="sync"))
@pytest.mark.parametrize("method", ["disk", "tasks"])
def test_shuffle_empty_partitions(method):
df = pd.DataFrame({"x": [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=method)
parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame(
{
"i32": np.array([1, 2, 3] * 3, dtype="int32"),
"f32": np.array([None, 2.5, 3.5] * 3, dtype="float32"),
"cat": pd.Series(["a", "b", "c"] * 3).astype("category"),
"obj": pd.Series(["d", "e", "f"] * 3),
"bool": np.array([True, False, True] * 3),
"dt": pd.Series(pd.date_range("20130101", periods=9)),
"dt_tz": pd.Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
"td": pd.Series(pd.timedelta_range("2000", periods=9)),
}
)
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[["i32"]], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[["cat", "bool", "f32"]], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({"a": list(string.ascii_letters), "b": [1, 2, 3, 4] * 13})
df.a = df.a.astype("category")
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize(
"npartitions", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]
)
def test_set_index_tasks(npartitions):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index("x"), ddf.set_index("x", shuffle="tasks"))
assert_eq(df.set_index("y"), ddf.set_index("y", shuffle="tasks"))
assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle="tasks"))
assert_eq(df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle="tasks"))
assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle="tasks"))
assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle="tasks"))
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_self_index(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle)
assert a is b
assert_eq(b, df.set_index(df.index))
@pytest.mark.parametrize("shuffle", ["tasks"])
def test_set_index_names(shuffle):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
assert set(ddf.set_index("x", shuffle=shuffle).dask) == set(
ddf.set_index("x", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", shuffle=shuffle).dask) != set(
ddf.set_index("y", shuffle=shuffle).dask
)
assert set(ddf.set_index("x", max_branch=4, shuffle=shuffle).dask) != set(
ddf.set_index("x", max_branch=3, shuffle=shuffle).dask
)
assert set(ddf.set_index("x", drop=True, shuffle=shuffle).dask) != set(
ddf.set_index("x", drop=False, shuffle=shuffle).dask
)
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_2(shuffle):
df = dd.demo.make_timeseries(
"2000",
"2004",
{"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
df2 = df.set_index("name", shuffle=shuffle)
df2.value.sum().compute(scheduler="sync")
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_tasks_3(shuffle):
df = pd.DataFrame(np.random.random((10, 2)), columns=["x", "y"])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, max_branch=2, npartitions=ddf.npartitions
)
df2 = df.set_index("x")
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
def test_shuffle_sort(shuffle):
df = pd.DataFrame({"x": [1, 2, 3, 2, 1], "y": [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index("x").sort_index()
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize("shuffle", ["tasks", "disk"])
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_rearrange(shuffle, scheduler):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle=shuffle)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(scheduler=scheduler)
get = dask.base.get_scheduler(scheduler=scheduler)
parts = get(result.dask, result.__dask_keys__())
for i in a._partitions.drop_duplicates():
assert sum(i in set(part._partitions) for part in parts) == 1
def test_rearrange_cleanup():
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle="disk")
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def mock_shuffle_group_3(df, col, npartitions, p):
raise ValueError("Mock exception!")
def test_rearrange_disk_cleanup_with_exception():
# ensure temporary files are cleaned up when there's an internal exception.
with mock.patch("dask.dataframe.shuffle.shuffle_group_3", new=mock_shuffle_group_3):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
with pytest.raises(ValueError, match="Mock exception!"):
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle="disk"
)
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, "x", (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd():
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({"x": [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.config.set(get=throw):
ddf2 = ddf.set_index("x", divisions=[1, 3, 5])
assert ddf2.divisions == (1, 3, 5)
df2 = df.set_index("x")
assert_eq(ddf2, df2)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("x", divisions=[3, 1, 5])
def test_set_index_divisions_2():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index("y", divisions=["a", "c", "d"])
assert result.divisions == ("a", "c", "d")
assert list(result.compute(scheduler="sync").index[-2:]) == ["d", "d"]
def test_set_index_divisions_compute():
d2 = d.set_index("b", divisions=[0, 2, 9], compute=False)
d3 = d.set_index("b", divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index("b"))
assert_eq(d3, full.set_index("b"))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_set_index_divisions_sorted():
p1 = pd.DataFrame({"x": [10, 11, 12], "y": ["a", "a", "a"]})
p2 = pd.DataFrame({"x": [13, 14, 15], "y": ["b", "b", "c"]})
p3 = pd.DataFrame({"x": [16, 17, 18], "y": ["d", "e", "e"]})
ddf = dd.DataFrame(
{("x", 0): p1, ("x", 1): p2, ("x", 2): p3}, "x", p1, [None, None, None, None]
)
df = ddf.compute()
def throw(*args, **kwargs):
raise Exception("Shouldn't have computed")
with dask.config.set(get=throw):
res = ddf.set_index("x", divisions=[10, 13, 16, 18], sorted=True)
assert_eq(res, df.set_index("x"))
with dask.config.set(get=throw):
res = ddf.set_index("y", divisions=["a", "b", "d", "e"], sorted=True)
assert_eq(res, df.set_index("y"))
# with sorted=True, divisions must be same length as df.divisions
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "c", "d", "e"], sorted=True)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "d", "c"], sorted=True)
@pytest.mark.slow
def test_set_index_consistent_divisions():
# See https://github.com/dask/dask/issues/3867
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
ddf = ddf.clear_divisions()
ctx = mp.get_context("spawn")
with ProcessPoolExecutor(8, ctx) as pool:
func = partial(_set_index, df=ddf, idx="x")
divisions_set = set(pool.map(func, range(100)))
assert len(divisions_set) == 1
def _set_index(i, df, idx):
return df.set_index(idx).divisions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_small(shuffle):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index("x", shuffle=shuffle, npartitions="auto")
assert ddf2.npartitions < 10
def make_part(n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_reduces_partitions_large(shuffle):
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert 1 < ddf2.npartitions < 20
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_doesnt_increase_partitions(shuffle):
nparts = 2
nbytes = 1e6
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle, npartitions="auto", partition_size=nbytes
)
assert ddf2.npartitions <= ddf.npartitions
@pytest.mark.parametrize("shuffle", ["disk", "tasks"])
def test_set_index_detects_sorted_data(shuffle):
df = pd.DataFrame({"x": range(100), "y": range(100)})
ddf = dd.from_pandas(df, npartitions=10, name="x", sort=False)
ddf2 = ddf.set_index("x", shuffle=shuffle)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_set_index_sorts():
# https://github.com/dask/dask/issues/2288
vals = np.array(
[
1348550149000000000,
1348550149000000000,
1348558142000000000,
1348558142000000000,
1348585928000000000,
1348585928000000000,
1348600739000000000,
1348601706000000000,
1348600739000000000,
1348601706000000000,
1348614789000000000,
1348614789000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348637628000000000,
1348638159000000000,
1348638160000000000,
1348638159000000000,
1348638160000000000,
1348637628000000000,
1348646354000000000,
1348646354000000000,
1348659107000000000,
1348657111000000000,
1348659107000000000,
1348657111000000000,
1348672876000000000,
1348672876000000000,
1348682787000000000,
1348681985000000000,
1348682787000000000,
1348681985000000000,
1348728167000000000,
1348728167000000000,
1348730745000000000,
1348730745000000000,
1348750198000000000,
1348750198000000000,
1348750198000000000,
1348753539000000000,
1348753539000000000,
1348753539000000000,
1348754449000000000,
1348754449000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
]
)
vals = pd.to_datetime(vals, unit="ns")
breaks = [10, 36, 58]
dfs = []
for i in range(len(breaks)):
lo = sum(breaks[:i])
hi = sum(breaks[i : i + 1])
dfs.append(pd.DataFrame({"timestamp": vals[lo:hi]}, index=range(lo, hi)))
ddf = dd.concat(dfs).clear_divisions()
assert ddf.set_index("timestamp").index.compute().is_monotonic is True
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
dask_cudf = pytest.importorskip("dask_cudf")
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 2, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [9, 1, 8]}, index=[9, 9, 9]),
}
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
if engine == "cudf":
d = dask_cudf.from_dask_dataframe(d)
full = d.compute()
d2 = d.set_index("b", npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == "b"
assert_eq(d2, full.set_index("b"))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == "b"
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index("b")
assert d4.index.name == "b"
assert_eq(d4, full.set_index("b"))
d5 = d.set_index(["b"])
assert d5.index.name == "b"
assert_eq(d5, full.set_index(["b"]))
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index_interpolate(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
df = pd.DataFrame({"x": [4, 1, 1, 3, 3], "y": [1.0, 1, 1, 1, 2]})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=3)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == {1, 2, 4}
d2 = d.set_index("y", npartitions=3)
assert d2.divisions[0] == 1.0
assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0
assert d2.divisions[3] == 2.0
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index_interpolate_int(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({"x": 2 * L})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index_interpolate_large_uint(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
"""This test is for #7304"""
df = pd.DataFrame(
{"x": np.array([612509347682975743, 616762138058293247], dtype=np.uint64)}
)
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 1)
d1 = d.set_index("x", npartitions=1)
assert d1.npartitions == 1
assert set(d1.divisions) == {612509347682975743, 616762138058293247}
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
df = pd.DataFrame({"tz": s_aware, "notz": s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index("notz", npartitions=1)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index("tz", npartitions=1)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
if PANDAS_GT_120:
# starting with pandas 1.2.0, comparing equality of timestamps with different
# timezones returns False instead of raising an error
assert not d2.divisions[0] == s2badtype[0]
else:
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
def test_set_index_npartitions():
# https://github.com/dask/dask/issues/6974
data = pd.DataFrame(
index=pd.Index(
["A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "C"]
)
)
data = dd.from_pandas(data, npartitions=2)
output = data.reset_index().set_index("index", npartitions=1)
assert output.npartitions == 1
@pytest.mark.parametrize("unit", ["ns", "us"])
def test_set_index_datetime_precision(unit):
# https://github.com/dask/dask/issues/6864
df = pd.DataFrame(
[
[1567703791155681, 1],
[1567703792155681, 2],
[1567703790155681, 0],
[1567703793155681, 3],
],
columns=["ts", "rank"],
)
df.ts = pd.to_datetime(df.ts, unit=unit)
ddf = dd.from_pandas(df, npartitions=2)
ddf = ddf.set_index("ts")
assert_eq(ddf, df.set_index("ts"))
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame(
{
"A": list("ABAABBABAA"),
"B": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"C": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index("A", drop=drop), pdf.set_index("A", drop=drop))
assert_eq(ddf.set_index("B", drop=drop), pdf.set_index("B", drop=drop))
assert_eq(ddf.set_index("C", drop=drop), pdf.set_index("C", drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame(
{
0: list("ABAABBABAA"),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with pytest.raises(NotImplementedError) as err:
ddf.set_index(["a", "b"])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a", "b"]])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a"]])
assert msg in str(err.value)
def test_set_index_sorted_true():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index("x", sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index("x", drop=drop), df.set_index("x", drop=drop))
assert_eq(
a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)
)
assert_eq(
a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop),
)
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_set_index_sorted_single_partition():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(ddf.set_index("x", sorted=True), df.set_index("x"))
def test_set_index_sorted_min_max_same():
a = pd.DataFrame({"x": [1, 2, 3], "y": [0, 0, 0]})
b = pd.DataFrame({"x": [1, 2, 3], "y": [1, 1, 1]})
aa = delayed(a)
bb = delayed(b)
df = dd.from_delayed([aa, bb], meta=a)
assert not df.known_divisions
df2 = df.set_index("y", sorted=True)
assert df2.divisions == (0, 1, 1)
def test_set_index_empty_partition():
test_vals = [1, 2, 3]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for conv in converters:
df = pd.DataFrame(
[{"x": conv(i), "y": i} for i in test_vals], columns=["x", "y"]
)
ddf = dd.concat(
[
dd.from_pandas(df, npartitions=1),
dd.from_pandas(df[df.y > df.y.max()], npartitions=1),
]
)
assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))
assert assert_eq(ddf.set_index("x"), df.set_index("x"))
def test_set_index_on_empty():
test_vals = [1, 2, 3, 4]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for converter in converters:
df = pd.DataFrame([{"x": converter(x), "y": x} for x in test_vals])
ddf = dd.from_pandas(df, npartitions=4)
assert ddf.npartitions > 1
ddf = ddf[ddf.y > df.y.max()].set_index("x")
expected_df = df[df.y > df.y.max()].set_index("x")
assert assert_eq(ddf, expected_df, **CHECK_FREQ)
assert ddf.npartitions == 1
def test_set_index_categorical():
# https://github.com/dask/dask/issues/5671
order = list(reversed(string.ascii_letters))
values = list(string.ascii_letters)
random.shuffle(values)
dtype = pd.api.types.CategoricalDtype(order, ordered=True)
df = pd.DataFrame({"A": pd.Categorical(values, dtype=dtype), "B": 1})
result = dd.from_pandas(df, npartitions=2).set_index("A")
assert len(result) == len(df)
# sorted with the metric defined by the Categorical
divisions = pd.Categorical(result.divisions, dtype=dtype)
assert_categorical_equal(divisions, divisions.sort_values())
def test_compute_divisions():
from dask.dataframe.shuffle import compute_and_set_divisions
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]},
index=[1, 3, 10, 20],
)
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = compute_and_set_divisions(copy(a))
assert_eq(a, b, check_divisions=False)
assert b.known_divisions
def test_empty_partitions():
# See https://github.com/dask/dask/issues/2408
df = pd.DataFrame({"a": list(range(10))})
df["b"] = df["a"] % 3
df["c"] = df["b"].astype(str)
ddf = dd.from_pandas(df, npartitions=3)
ddf = ddf.set_index("b")
ddf = ddf.repartition(npartitions=3)
ddf.get_partition(0).compute()
assert_eq(ddf, df.set_index("b"))
ddf = ddf.set_index("c")
assert_eq(ddf, df.set_index("b").set_index("c"))
def test_remove_nans():
tests = [
((1, 1, 2), (1, 1, 2)),
((None, 1, 2), (1, 1, 2)),
((1, None, 2), (1, 2, 2)),
((1, 2, None), (1, 2, 2)),
((1, 2, None, None), (1, 2, 2, 2)),
((None, None, 1, 2), (1, 1, 1, 2)),
((1, None, None, 2), (1, 2, 2, 2)),
((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),
]
converters = [
(int, np.nan),
(float, np.nan),
(str, np.nan),
(lambda x: pd.to_datetime(x, unit="ns"), np.datetime64("NaT")),
]
for conv, none_val in converters:
for inputs, expected in tests:
params = [none_val if x is None else conv(x) for x in inputs]
expected = [conv(x) for x in expected]
assert remove_nans(params) == expected
@pytest.mark.slow
def test_gh_2730():
large = pd.DataFrame({"KEY": np.arange(0, 50000)})
small = pd.DataFrame({"KEY": np.arange(25, 500)})
dd_left = dd.from_pandas(small, npartitions=3)
dd_right = dd.from_pandas(large, npartitions=257)
with dask.config.set(shuffle="tasks", scheduler="sync"):
dd_merged = dd_left.merge(dd_right, how="inner", on="KEY")
result = dd_merged.compute()
expected = large.merge(small, how="inner", on="KEY")
tm.assert_frame_equal(result.sort_values("KEY").reset_index(drop=True), expected)
@pytest.mark.parametrize("npartitions", [None, "auto"])
def test_set_index_does_not_repeat_work_due_to_optimizations(npartitions):
# Atomic counter
count = itertools.count()
def increment():
next(count)
def make_part(dummy, n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
dsk = {("inc", i): (increment,) for i in range(nparts)}
dsk.update({("x", i): (make_part, ("inc", i), n) for i in range(nparts)})
ddf = dd.DataFrame(dsk, "x", make_part(None, 1), [None] * (nparts + 1))
ddf.set_index("x", npartitions=npartitions)
ntimes = next(count)
assert ntimes == nparts
def test_set_index_errors_with_inplace_kwarg():
df = pd.DataFrame({"a": [9, 8, 7], "b": [6, 5, 4], "c": [3, 2, 1]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.set_index("a")
with pytest.raises(NotImplementedError):
ddf.set_index("a", inplace=True)
def test_set_index_timestamp():
df = pd.DataFrame({"A": pd.date_range("2000", periods=12, tz="US/Central"), "B": 1})
ddf = dd.from_pandas(df, 2)
divisions = (
pd.Timestamp("2000-01-01 00:00:00-0600", tz="US/Central"),
pd.Timestamp("2000-01-12 00:00:00-0600", tz="US/Central"),
)
# Note: `freq` is lost during round trip
df2 = df.set_index("A")
ddf_new_div = ddf.set_index("A", divisions=divisions)
for (ts1, ts2) in zip(divisions, ddf_new_div.divisions):
assert ts1.value == ts2.value
assert ts1.tz == ts2.tz
assert_eq(df2, ddf_new_div, **CHECK_FREQ)
assert_eq(df2, ddf.set_index("A"), **CHECK_FREQ)
@pytest.mark.parametrize("compression", [None, "ZLib"])
def test_disk_shuffle_with_compression_option(compression):
# test if dataframe shuffle works both with and without compression
with dask.config.set({"dataframe.shuffle-compression": compression}):
test_shuffle("disk")
@pytest.mark.parametrize("compression", ["UNKOWN_COMPRESSION_ALGO"])
def test_disk_shuffle_with_unknown_compression(compression):
# test if dask raises an error in case of fault config string
with dask.config.set({"dataframe.shuffle-compression": compression}):
with pytest.raises(
ImportError,
match=(
"Not able to import and load {} as compression algorithm."
"Please check if the library is installed and supported by Partd.".format(
compression
)
),
):
test_shuffle("disk")
def test_disk_shuffle_check_actual_compression():
# test if the compression switch is really respected by testing the size of the actual partd-data on disk
def generate_raw_partd_file(compression):
# generate and write a dummy dataframe to disk and return the raw data bytes
df1 = pd.DataFrame({"a": list(range(10000))})
df1["b"] = (df1["a"] * 123).astype(str)
with dask.config.set({"dataframe.shuffle-compression": compression}):
p1 = maybe_buffered_partd(buffer=False, tempdir=None)()
p1.append({"x": df1})
# get underlying filename from partd - depending on nested structure of partd object
filename = (
p1.partd.partd.filename("x") if compression else p1.partd.filename("x")
)
with open(filename, "rb") as f:
return f.read()
# get compressed and uncompressed raw data
uncompressed_data = generate_raw_partd_file(compression=None)
compressed_data = generate_raw_partd_file(compression="BZ2")
assert len(uncompressed_data) > len(compressed_data)
@pytest.mark.parametrize("ignore_index", [None, True, False])
@pytest.mark.parametrize(
"on", ["id", "name", ["id", "name"], pd.Series(["id", "name"])]
)
@pytest.mark.parametrize("max_branch", [None, 4])
def test_dataframe_shuffle_on_tasks_api(on, ignore_index, max_branch):
# Make sure DataFrame.shuffle API returns the same result
# whether the ``on`` argument is a list of column names,
# or a separate DataFrame with equivalent values...
df_in = dask.datasets.timeseries(
"2000",
"2001",
types={"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
if isinstance(on, str):
ext_on = df_in[[on]].copy()
else:
ext_on = df_in[on].copy()
df_out_1 = df_in.shuffle(
on, shuffle="tasks", ignore_index=ignore_index, max_branch=max_branch
)
df_out_2 = df_in.shuffle(ext_on, shuffle="tasks", ignore_index=ignore_index)
assert_eq(df_out_1, df_out_2, check_index=(not ignore_index))
if ignore_index:
assert df_out_1.index.dtype != df_in.index.dtype
else:
assert df_out_1.index.dtype == df_in.index.dtype
def test_set_index_overlap():
A = pd.DataFrame({"key": [1, 2, 3, 4, 4, 5, 6, 7], "value": list("abcd" * 2)})
a = dd.from_pandas(A, npartitions=2)
a = a.set_index("key", sorted=True)
b = a.repartition(divisions=a.divisions)
assert_eq(a, b)
def test_set_index_overlap_2():
data = pd.DataFrame(
index=pd.Index(
["A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "C"],
name="index",
)
)
ddf1 = dd.from_pandas(data, npartitions=2)
ddf2 = ddf1.reset_index().repartition(8).set_index("index", sorted=True)
assert_eq(ddf1, ddf2)
assert ddf2.npartitions == 8
def test_shuffle_hlg_layer():
# This test checks that the `ShuffleLayer` HLG Layer
# is used (as expected) for a multi-stage shuffle.
ddf = dd.from_pandas(
pd.DataFrame({"a": np.random.randint(0, 10, 100)}), npartitions=10
)
ddf_shuffled = ddf.shuffle("a", max_branch=3, shuffle="tasks")
keys = [(ddf_shuffled._name, i) for i in range(ddf_shuffled.npartitions)]
# Cull the HLG
dsk = ddf_shuffled.__dask_graph__()
dsk_culled = dsk.cull(set(keys))
assert isinstance(dsk_culled, dask.highlevelgraph.HighLevelGraph)
# Ensure we have ShuffleLayers
assert any(
isinstance(layer, dd.shuffle.ShuffleLayer) for layer in dsk.layers.values()
)
# Check that the ShuffleLayers are non-materialized
for layer in dsk.layers.values():
if isinstance(layer, dd.shuffle.ShuffleLayer):
assert not hasattr(layer, "_cached_dict")
# Make sure HLG culling reduces the graph size
assert len(dsk_culled) < len(dsk)
# Check ShuffleLayer names
for name, layer in dsk.layers.items():
if isinstance(layer, dd.shuffle.ShuffleLayer):
assert name.startswith("shuffle-")
# Since we already culled the HLG,
# culling the dictionary should not change the graph
dsk_dict = dict(dsk_culled)
dsk_dict_culled, _ = cull(dsk_dict, keys)
assert dsk_dict_culled == dsk_dict
@pytest.mark.parametrize(
"npartitions",
[
10, # ShuffleLayer
1, # SimpleShuffleLayer
],
)
def test_shuffle_hlg_layer_serialize(npartitions):
ddf = dd.from_pandas(
pd.DataFrame({"a": np.random.randint(0, 10, 100)}), npartitions=npartitions
)
ddf_shuffled = ddf.shuffle("a", max_branch=3, shuffle="tasks")
# Ensure shuffle layers can be serialized and don't result in
# the underlying low-level graph being materialized
dsk = ddf_shuffled.__dask_graph__()
for layer in dsk.layers.values():
if not isinstance(layer, dd.shuffle.SimpleShuffleLayer):
continue
assert not hasattr(layer, "_cached_dict")
layer_roundtrip = pickle.loads(pickle.dumps(layer))
assert type(layer_roundtrip) == type(layer)
assert not hasattr(layer_roundtrip, "_cached_dict")
assert layer_roundtrip.keys() == layer.keys()
def test_set_index_nan_partition():
d[d.a > 3].set_index("a") # Set index with 1 null partition
d[d.a > 1].set_index("a", sorted=True) # Set sorted index with 0 null partitions
a = d[d.a > 3].set_index("a", sorted=True) # Set sorted index with 1 null partition
assert_eq(a, a)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("by", ["a", "b"])
@pytest.mark.parametrize("nelem", [10, 500])
@pytest.mark.parametrize("nparts", [1, 10])
def test_sort_values(nelem, nparts, by, ascending):
np.random.seed(0)
df = pd.DataFrame()
df["a"] = np.ascontiguousarray(np.arange(nelem)[::-1])
df["b"] = np.arange(100, nelem + 100)
ddf = dd.from_pandas(df, npartitions=nparts)
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=by, ascending=ascending)
expect = df.sort_values(by=by, ascending=ascending)
dd.assert_eq(got, expect, check_index=False)
@pytest.mark.parametrize("na_position", ["first", "last"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("by", ["a", "b"])
@pytest.mark.parametrize("nparts", [1, 5])
@pytest.mark.parametrize(
"data",
[
{
"a": list(range(50)) + [None] * 50 + list(range(50, 100)),
"b": [None] * 100 + list(range(100, 150)),
},
{"a": list(range(15)) + [None] * 5, "b": list(reversed(range(20)))},
],
)
def test_sort_values_with_nulls(data, nparts, by, ascending, na_position):
df = pd.DataFrame(data)
ddf = dd.from_pandas(df, npartitions=nparts)
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=by, ascending=ascending, na_position=na_position)
expect = df.sort_values(by=by, ascending=ascending, na_position=na_position)
dd.assert_eq(got, expect, check_index=False)
| 32.712589
| 109
| 0.619469
|
35f0bdd233393ce0f6f0ad6fae553ccafa610e27
| 4,350
|
py
|
Python
|
cryspy/A_functions_base/preferred_orientation.py
|
ikibalin/rhochi
|
1ca03f18dc72006322a101ed877cdbba33ed61e7
|
[
"MIT"
] | null | null | null |
cryspy/A_functions_base/preferred_orientation.py
|
ikibalin/rhochi
|
1ca03f18dc72006322a101ed877cdbba33ed61e7
|
[
"MIT"
] | null | null | null |
cryspy/A_functions_base/preferred_orientation.py
|
ikibalin/rhochi
|
1ca03f18dc72006322a101ed877cdbba33ed61e7
|
[
"MIT"
] | null | null | null |
import numpy
from .unit_cell import calc_eq_ccs_by_unit_cell_parameters
from .powder_diffraction_const_wavelength import calc_gamma_nu_by_ttheta_phi
def calc_cos_ang(cell, h_1, k_1, l_1, h_2, k_2, l_2):
"""Calculate directed cosines."""
q_1_x, q_1_y, q_1_z = cell.calc_k_loc(h_1, k_1, l_1)
q_2_x, q_2_y, q_2_z = cell.calc_k_loc(h_2, k_2, l_2)
q_1_sq = q_1_x*q_1_x + q_1_y*q_1_y + q_1_z*q_1_z
q_2_sq = q_2_x*q_2_x + q_2_y*q_2_y + q_2_z*q_2_z
q_12 = q_1_x*q_2_x + q_1_y*q_2_y + q_1_z*q_2_z
res = q_12/(q_1_sq*q_2_sq)**0.5
res[res > 1.] = 1.
return res
na = numpy.newaxis
def calc_preferred_orientation_pd(
index_hkl, texture_g1, texture_g2, texture_axis, unit_cell_parameters,
flag_texture_g1: bool = False, flag_texture_g2: bool = False,
flag_texture_axis: bool = False, flag_unit_cell_parameters: bool = False):
"""Preferred orintation by Modified March-Dollas model.
"""
preferred_orientation = None
eq_axis_ccs, dder_eq_axis_ccs = calc_eq_ccs_by_unit_cell_parameters(
texture_axis, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
eq_hkl_ccs, dder_eq_hkl_ccs = calc_eq_ccs_by_unit_cell_parameters(
index_hkl, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
cos_alpha_ax = (eq_axis_ccs[:, 0][:, na] * eq_hkl_ccs).sum(axis=0)
sin_alpha_ax_sq = numpy.abs(1. - numpy.square(cos_alpha_ax))
cos_alpha_sq = sin_alpha_ax_sq
hh = 1./texture_g1 + (texture_g1**2 - 1./texture_g1) * cos_alpha_sq
preferred_orientation = texture_g2 + (1. - texture_g2) * numpy.power(hh, -1.5)
dder_po = {}
if flag_texture_g2:
dder_po["texture_g2"] = 1. - numpy.power(hh, -1.5)
if flag_texture_g1:
dder_po["texture_g1"] = -1.5*(1. - texture_g2) * numpy.power(hh, -2.5) * \
(- (1.-cos_alpha_sq)/numpy.square(texture_g1) + 2*cos_alpha_sq*texture_g1)
return preferred_orientation, dder_po
def calc_preferred_orientation_pd2d(alpha_det,
index_hkl, texture_g1, texture_g2, texture_axis, unit_cell_parameters,
flag_texture_g1: bool = False, flag_texture_g2: bool = False,
flag_texture_axis: bool = False, flag_unit_cell_parameters: bool = False):
"""Preferred orintation by Modified March-Dollas model.
"""
preferred_orientation = None
eq_axis_ccs, dder_eq_axis_ccs = calc_eq_ccs_by_unit_cell_parameters(
texture_axis, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
eq_hkl_ccs, dder_eq_hkl_ccs = calc_eq_ccs_by_unit_cell_parameters(
index_hkl, unit_cell_parameters, flag_unit_cell_parameters=flag_unit_cell_parameters)
cos_alpha_ax = (eq_axis_ccs[:, 0][:, na] * eq_hkl_ccs).sum(axis=0)
cos_alpha_ax[cos_alpha_ax > 1.] = 1.
c_help = 1.-cos_alpha_ax**2
c_help[c_help < 0.] = 0.
sin_alpha_ax = numpy.sqrt(c_help)
cos_alpha_det = numpy.cos(alpha_det)
sin_alpha_det = numpy.sin(alpha_det)
cos_alpha = cos_alpha_ax[na, na,:] * cos_alpha_det[:,:,na] + sin_alpha_ax[na, na,:] * sin_alpha_det[:,:,na]
cos_alpha_sq = numpy.square(cos_alpha)
hh = 1./texture_g1 + (texture_g1**2 - 1./texture_g1) * cos_alpha_sq
preferred_orientation = texture_g2 + (1. - texture_g2) * numpy.power(hh, -1.5)
dder_po = {}
if flag_texture_g2:
dder_po["texture_g2"] = 1. - numpy.power(hh, -1.5)
if flag_texture_g1:
dder_po["texture_g1"] = -1.5*(1. - texture_g2) * numpy.power(hh, -2.5) * \
(- (1.-cos_alpha_sq)/numpy.square(texture_g1) + 2*cos_alpha_sq*texture_g1)
return preferred_orientation, dder_po
def calc_gamma_nu_for_textured_peaks(eq_axis, eq_ccs, ttheta_hkl, texture_g1):
c_csc = numpy.sum(eq_ccs*eq_axis, axis=0)
inv_c_h = 1./numpy.cos(0.5 * ttheta_hkl)
if texture_g1 <= 1:
s_phi = inv_c_h*c_csc
s_phi[s_phi>1] = 1.
s_phi[s_phi<-1] = -1.
phi_max = numpy.arcsin(s_phi)
else:
s_csc = numpy.sqrt(1.-numpy.square(c_csc))
s_csc[s_csc>1.] = 1.
s_phi = inv_c_h*s_csc
s_phi[s_phi>1] = 1.
s_phi[s_phi<-1] = -1.
phi_max = numpy.arcsin(s_phi)
gamma_hkl, nu_hkl = calc_gamma_nu_by_ttheta_phi(
ttheta_hkl, phi_max, flag_ttheta=False, flag_phi=False)[:2]
return gamma_hkl, nu_hkl
| 39.908257
| 111
| 0.691954
|
1c64b45aba7723e9e035375f4949b3a95ca6a32a
| 25,091
|
py
|
Python
|
vnpy/gateway/minitest/minitest_gateway.py
|
wenhaoLong/vnpyTrader
|
ff37e288042b9d9f9350a1c528a53c77d56ae849
|
[
"MIT"
] | null | null | null |
vnpy/gateway/minitest/minitest_gateway.py
|
wenhaoLong/vnpyTrader
|
ff37e288042b9d9f9350a1c528a53c77d56ae849
|
[
"MIT"
] | null | null | null |
vnpy/gateway/minitest/minitest_gateway.py
|
wenhaoLong/vnpyTrader
|
ff37e288042b9d9f9350a1c528a53c77d56ae849
|
[
"MIT"
] | 2
|
2021-03-07T18:13:21.000Z
|
2021-12-13T10:20:10.000Z
|
"""
"""
from datetime import datetime
from vnpy.api.mini import (
THOST_FTDC_OAS_Submitted,
THOST_FTDC_OAS_Accepted,
THOST_FTDC_OAS_Rejected,
THOST_FTDC_OST_NoTradeQueueing,
THOST_FTDC_OST_PartTradedQueueing,
THOST_FTDC_OST_AllTraded,
THOST_FTDC_OST_Canceled,
THOST_FTDC_D_Buy,
THOST_FTDC_D_Sell,
THOST_FTDC_PD_Long,
THOST_FTDC_PD_Short,
THOST_FTDC_OPT_LimitPrice,
THOST_FTDC_OPT_AnyPrice,
THOST_FTDC_OF_Open,
THOST_FTDC_OFEN_Close,
THOST_FTDC_OFEN_CloseYesterday,
THOST_FTDC_OFEN_CloseToday,
THOST_FTDC_PC_Futures,
THOST_FTDC_PC_Options,
THOST_FTDC_PC_Combination,
THOST_FTDC_CP_CallOptions,
THOST_FTDC_CP_PutOptions,
THOST_FTDC_HF_Speculation,
THOST_FTDC_CC_Immediately,
THOST_FTDC_FCC_NotForceClose,
THOST_FTDC_TC_GFD,
THOST_FTDC_VC_AV,
THOST_FTDC_TC_IOC,
THOST_FTDC_VC_CV,
THOST_FTDC_AF_Delete
)
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
OrderType,
Product,
Status,
OptionType
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
)
from vnpy.trader.utility import get_folder_path
from vnpy.trader.event import EVENT_TIMER
from .vnminimd import MdApi
from .vnminitd import TdApi
STATUS_MINI2VT = {
THOST_FTDC_OAS_Submitted: Status.SUBMITTING,
THOST_FTDC_OAS_Accepted: Status.SUBMITTING,
THOST_FTDC_OAS_Rejected: Status.REJECTED,
THOST_FTDC_OST_NoTradeQueueing: Status.NOTTRADED,
THOST_FTDC_OST_PartTradedQueueing: Status.PARTTRADED,
THOST_FTDC_OST_AllTraded: Status.ALLTRADED,
THOST_FTDC_OST_Canceled: Status.CANCELLED
}
DIRECTION_VT2MINI = {
Direction.LONG: THOST_FTDC_D_Buy,
Direction.SHORT: THOST_FTDC_D_Sell
}
DIRECTION_MINI2VT = {v: k for k, v in DIRECTION_VT2MINI.items()}
DIRECTION_MINI2VT[THOST_FTDC_PD_Long] = Direction.LONG
DIRECTION_MINI2VT[THOST_FTDC_PD_Short] = Direction.SHORT
ORDERTYPE_VT2MINI = {
OrderType.LIMIT: THOST_FTDC_OPT_LimitPrice,
OrderType.MARKET: THOST_FTDC_OPT_AnyPrice
}
ORDERTYPE_MINI2VT = {v: k for k, v in ORDERTYPE_VT2MINI.items()}
OFFSET_VT2MINI = {
Offset.OPEN: THOST_FTDC_OF_Open,
Offset.CLOSE: THOST_FTDC_OFEN_Close,
Offset.CLOSETODAY: THOST_FTDC_OFEN_CloseToday,
Offset.CLOSEYESTERDAY: THOST_FTDC_OFEN_CloseYesterday,
}
OFFSET_MINI2VT = {v: k for k, v in OFFSET_VT2MINI.items()}
EXCHANGE_MINI2VT = {
"CFFEX": Exchange.CFFEX,
"SHFE": Exchange.SHFE,
"CZCE": Exchange.CZCE,
"DCE": Exchange.DCE,
"INE": Exchange.INE
}
PRODUCT_MINI2VT = {
THOST_FTDC_PC_Futures: Product.FUTURES,
THOST_FTDC_PC_Options: Product.OPTION,
THOST_FTDC_PC_Combination: Product.SPREAD
}
OPTIONTYPE_MINI2VT = {
THOST_FTDC_CP_CallOptions: OptionType.CALL,
THOST_FTDC_CP_PutOptions: OptionType.PUT
}
symbol_exchange_map = {}
symbol_name_map = {}
symbol_size_map = {}
class MinitestGateway(BaseGateway):
"""
VN Trader Gateway for CTP Mini.
"""
default_setting = {
"用户名": "",
"密码": "",
"经纪商代码": "",
"交易服务器": "",
"行情服务器": "",
"产品名称": "",
"授权编码": "",
"产品信息": ""
}
exchanges = list(EXCHANGE_MINI2VT.values())
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "MINITEST")
self.td_api = MiniTdApi(self)
self.md_api = MiniMdApi(self)
def connect(self, setting: dict):
""""""
userid = setting["用户名"]
password = setting["密码"]
brokerid = setting["经纪商代码"]
td_address = setting["交易服务器"]
md_address = setting["行情服务器"]
appid = setting["产品名称"]
auth_code = setting["授权编码"]
product_info = setting["产品信息"]
if not td_address.startswith("tcp://"):
td_address = "tcp://" + td_address
if not md_address.startswith("tcp://"):
md_address = "tcp://" + md_address
self.td_api.connect(td_address, userid, password, brokerid, auth_code, appid, product_info)
self.md_api.connect(md_address, userid, password, brokerid)
self.init_query()
def subscribe(self, req: SubscribeRequest):
""""""
self.md_api.subscribe(req)
def send_order(self, req: OrderRequest):
""""""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.td_api.cancel_order(req)
def query_account(self):
""""""
self.td_api.query_account()
def query_position(self):
""""""
self.td_api.query_position()
def close(self):
""""""
self.td_api.close()
self.md_api.close()
def write_error(self, msg: str, error: dict):
""""""
error_id = error["ErrorID"]
error_msg = error["ErrorMsg"]
msg = f"{msg},代码:{error_id},信息:{error_msg}"
self.write_log(msg)
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self):
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
class MiniMdApi(MdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(MiniMdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.connect_status = False
self.login_status = False
self.subscribed = set()
self.userid = ""
self.password = ""
self.brokerid = ""
def onFrontConnected(self):
"""
Callback when front server is connected.
"""
self.gateway.write_log("行情服务器连接成功")
self.login()
def onFrontDisconnected(self, reason: int):
"""
Callback when front server is disconnected.
"""
self.login_status = False
self.gateway.write_log(f"行情服务器连接断开,原因{reason}")
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback when user is logged in.
"""
if not error["ErrorID"]:
self.login_status = True
self.gateway.write_log("行情服务器登录成功")
for symbol in self.subscribed:
self.subscribeMarketData(symbol)
else:
self.gateway.write_error("行情服务器登录失败", error)
def onRspError(self, error: dict, reqid: int, last: bool):
"""
Callback when error occured.
"""
self.gateway.write_error("行情接口报错", error)
def onRspSubMarketData(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error or not error["ErrorID"]:
return
self.gateway.write_error("行情订阅失败", error)
def onRtnDepthMarketData(self, data: dict):
"""
Callback of tick data update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
return
timestamp = f"{data['ActionDay']} {data['UpdateTime']}.{int(data['UpdateMillisec']/100)}"
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.strptime(timestamp, "%Y%m%d %H:%M:%S.%f"),
name=symbol_name_map[symbol],
volume=data["Volume"],
open_interest=data["OpenInterest"],
last_price=data["LastPrice"],
limit_up=data["UpperLimitPrice"],
limit_down=data["LowerLimitPrice"],
open_price=data["OpenPrice"],
high_price=data["HighestPrice"],
low_price=data["LowestPrice"],
pre_close=data["PreClosePrice"],
bid_price_1=data["BidPrice1"],
ask_price_1=data["AskPrice1"],
bid_volume_1=data["BidVolume1"],
ask_volume_1=data["AskVolume1"],
gateway_name=self.gateway_name
)
if data["BidPrice2"]:
tick.bid_price_2 = data["BidPrice2"]
tick.bid_price_3 = data["BidPrice3"]
tick.bid_price_4 = data["BidPrice4"]
tick.bid_price_5 = data["BidPrice5"]
tick.ask_price_2 = data["AskPrice2"]
tick.ask_price_3 = data["AskPrice3"]
tick.ask_price_4 = data["AskPrice4"]
tick.ask_price_5 = data["AskPrice5"]
tick.bid_volume_2 = data["BidVolume2"]
tick.bid_volume_3 = data["BidVolume3"]
tick.bid_volume_4 = data["BidVolume4"]
tick.bid_volume_5 = data["BidVolume5"]
tick.ask_volume_2 = data["AskVolume2"]
tick.ask_volume_3 = data["AskVolume3"]
tick.ask_volume_4 = data["AskVolume4"]
tick.ask_volume_5 = data["AskVolume5"]
self.gateway.on_tick(tick)
def connect(self, address: str, userid: str, password: str, brokerid: int):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
# If not connected, then start connection first.
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcMdApi(str(path) + "\\Md")
self.registerFront(address)
self.init()
self.connect_status = True
# If already connected, then login immediately.
elif not self.login_status:
self.login()
def login(self):
"""
Login onto server.
"""
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid
}
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def subscribe(self, req: SubscribeRequest):
"""
Subscribe to tick data update.
"""
if self.login_status:
self.subscribeMarketData(req.symbol)
self.subscribed.add(req.symbol)
def close(self):
"""
Close the connection.
"""
if self.connect_status:
self.exit()
class MiniTdApi(TdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(MiniTdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.order_ref = 0
self.connect_status = False
self.login_status = False
self.auth_staus = False
self.login_failed = False
self.userid = ""
self.password = ""
self.brokerid = ""
self.auth_code = ""
self.appid = ""
self.product_info = ""
self.frontid = 0
self.sessionid = 0
self.order_data = []
self.trade_data = []
self.positions = {}
self.sysid_orderid_map = {}
def onFrontConnected(self):
""""""
self.gateway.write_log("交易服务器连接成功")
if self.auth_code:
self.authenticate()
else:
self.login()
def onFrontDisconnected(self, reason: int):
""""""
self.login_status = False
self.gateway.write_log(f"交易服务器连接断开,原因{reason}")
def onRspAuthenticate(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error['ErrorID']:
self.auth_staus = True
self.gateway.write_log("交易服务器授权验证成功")
self.login()
else:
self.gateway.write_error("交易服务器授权验证失败", error)
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error["ErrorID"]:
self.frontid = data["FrontID"]
self.sessionid = data["SessionID"]
self.login_status = True
self.gateway.write_log("交易服务器登录成功")
# Get instrument data directly without confirm settlement
self.reqid += 1
self.reqQryInstrument({}, self.reqid)
else:
self.login_failed = True
self.gateway.write_error("交易服务器登录失败", error)
def onRspOrderInsert(self, data: dict, error: dict, reqid: int, last: bool):
""""""
order_ref = data["OrderRef"]
orderid = f"{self.frontid}_{self.sessionid}_{order_ref}"
symbol = data["InstrumentID"]
exchange = symbol_exchange_map[symbol]
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
direction=DIRECTION_MINI2VT[data["Direction"]],
offset=OFFSET_MINI2VT.get(data["CombOffsetFlag"], Offset.NONE),
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
status=Status.REJECTED,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.gateway.write_error("交易委托失败", error)
def onRspOrderAction(self, data: dict, error: dict, reqid: int, last: bool):
""""""
self.gateway.write_error("交易撤单失败", error)
def onRspQueryMaxOrderVolume(self, data: dict, error: dict, reqid: int, last: bool):
""""""
pass
def onRspSettlementInfoConfirm(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of settlment info confimation.
"""
pass
def onRspQryInvestorPosition(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if data:
# Get buffered position object
key = f"{data['InstrumentID'], data['PosiDirection']}"
position = self.positions.get(key, None)
if not position:
position = PositionData(
symbol=data["InstrumentID"],
exchange=symbol_exchange_map[data["InstrumentID"]],
direction=DIRECTION_MINI2VT[data["PosiDirection"]],
gateway_name=self.gateway_name
)
self.positions[key] = position
# For SHFE position data update
if position.exchange == Exchange.SHFE:
if data["YdPosition"] and not data["TodayPosition"]:
position.yd_volume = data["Position"]
# For other exchange position data update
else:
position.yd_volume = data["Position"] - data["TodayPosition"]
# Get contract size (spread contract has no size value)
size = symbol_size_map.get(position.symbol, 0)
# Calculate previous position cost
cost = position.price * position.volume * size
# Update new position volume
position.volume += data["Position"]
position.pnl += data["PositionProfit"]
# Calculate average position price
if position.volume and size:
cost += data["PositionCost"]
position.price = cost / (position.volume * size)
# Get frozen volume
if position.direction == Direction.LONG:
position.frozen += data["ShortFrozen"]
else:
position.frozen += data["LongFrozen"]
if last:
for position in self.positions.values():
self.gateway.on_position(position)
self.positions.clear()
def onRspQryTradingAccount(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if "AccountID" not in data:
return
account = AccountData(
accountid=data["AccountID"],
balance=data["Balance"],
frozen=data["FrozenMargin"] + data["FrozenCash"] + data["FrozenCommission"],
gateway_name=self.gateway_name
)
account.available = data["Available"]
self.gateway.on_account(account)
def onRspQryInstrument(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of instrument query.
"""
product = PRODUCT_MINI2VT.get(data.get("ProductClass", None), None)
if product:
contract = ContractData(
symbol=data["InstrumentID"],
exchange=EXCHANGE_MINI2VT[data["ExchangeID"]],
name=data["InstrumentName"],
product=product,
size=data["VolumeMultiple"],
pricetick=data["PriceTick"],
gateway_name=self.gateway_name
)
# For option only
if contract.product == Product.OPTION:
contract.option_underlying = data["UnderlyingInstrID"],
contract.option_type = OPTIONTYPE_MINI2VT.get(data["OptionsType"], None),
contract.option_strike = data["StrikePrice"],
contract.option_expiry = datetime.strptime(data["ExpireDate"], "%Y%m%d"),
self.gateway.on_contract(contract)
symbol_exchange_map[contract.symbol] = contract.exchange
symbol_name_map[contract.symbol] = contract.name
symbol_size_map[contract.symbol] = contract.size
if last:
self.gateway.write_log("合约信息查询成功")
for data in self.order_data:
self.onRtnOrder(data)
self.order_data.clear()
for data in self.trade_data:
self.onRtnTrade(data)
self.trade_data.clear()
def onRtnOrder(self, data: dict):
"""
Callback of order status update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.order_data.append(data)
return
frontid = data["FrontID"]
sessionid = data["SessionID"]
order_ref = data["OrderRef"]
orderid = f"{frontid}_{sessionid}_{order_ref}"
order = OrderData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
type=ORDERTYPE_MINI2VT[data["OrderPriceType"]],
direction=DIRECTION_MINI2VT[data["Direction"]],
offset=OFFSET_MINI2VT[data["CombOffsetFlag"]],
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
traded=data["VolumeTraded"],
status=STATUS_MINI2VT[data["OrderStatus"]],
time=data["InsertTime"],
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.sysid_orderid_map[data["OrderSysID"]] = orderid
def onRtnTrade(self, data: dict):
"""
Callback of trade status update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.trade_data.append(data)
return
orderid = self.sysid_orderid_map[data["OrderSysID"]]
trade = TradeData(
symbol=symbol,
exchange=exchange,
orderid=orderid,
tradeid=data["TradeID"],
direction=DIRECTION_MINI2VT[data["Direction"]],
offset=OFFSET_MINI2VT[data["OffsetFlag"]],
price=data["Price"],
volume=data["Volume"],
datetime=data["TradeTime"],
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
def connect(
self,
address: str,
userid: str,
password: str,
brokerid: int,
auth_code: str,
appid: str,
product_info
):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
self.auth_code = auth_code
self.appid = appid
self.product_info = product_info
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcTraderApi(str(path) + "\\Td")
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
self.registerFront(address)
self.init()
self.connect_status = True
else:
self.authenticate()
def authenticate(self):
"""
Authenticate with auth_code and appid.
"""
req = {
"UserID": self.userid,
"BrokerID": self.brokerid,
"AuthCode": self.auth_code,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqAuthenticate(req, self.reqid)
def login(self):
"""
Login onto server.
"""
if self.login_failed:
return
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def send_order(self, req: OrderRequest):
"""
Send new order.
"""
self.order_ref += 1
mini_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"LimitPrice": req.price,
"VolumeTotalOriginal": int(req.volume),
"OrderPriceType": ORDERTYPE_VT2MINI.get(req.type, ""),
"Direction": DIRECTION_VT2MINI.get(req.direction, ""),
"CombOffsetFlag": OFFSET_VT2MINI.get(req.offset, ""),
"OrderRef": str(self.order_ref),
"InvestorID": self.userid,
"UserID": self.userid,
"BrokerID": self.brokerid,
"CombHedgeFlag": THOST_FTDC_HF_Speculation,
"ContingentCondition": THOST_FTDC_CC_Immediately,
"ForceCloseReason": THOST_FTDC_FCC_NotForceClose,
"IsAutoSuspend": 0,
"TimeCondition": THOST_FTDC_TC_GFD,
"VolumeCondition": THOST_FTDC_VC_AV,
"MinVolume": 1
}
if req.type == OrderType.FAK:
mini_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
mini_req["TimeCondition"] = THOST_FTDC_TC_IOC
mini_req["VolumeCondition"] = THOST_FTDC_VC_AV
elif req.type == OrderType.FOK:
mini_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
mini_req["TimeCondition"] = THOST_FTDC_TC_IOC
mini_req["VolumeCondition"] = THOST_FTDC_VC_CV
self.reqid += 1
self.reqOrderInsert(mini_req, self.reqid)
orderid = f"{self.frontid}_{self.sessionid}_{self.order_ref}"
order = req.create_order_data(orderid, self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel existing order.
"""
frontid, sessionid, order_ref = req.orderid.split("_")
mini_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"OrderRef": order_ref,
"FrontID": int(frontid),
"SessionID": int(sessionid),
"ActionFlag": THOST_FTDC_AF_Delete,
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqOrderAction(mini_req, self.reqid)
def query_account(self):
"""
Query account balance data.
"""
self.reqid += 1
self.reqQryTradingAccount({}, self.reqid)
def query_position(self):
"""
Query position holding data.
"""
if not symbol_exchange_map:
return
req = {
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqQryInvestorPosition(req, self.reqid)
def close(self):
""""""
if self.connect_status:
self.exit()
| 30.673594
| 99
| 0.565262
|
b5792ffc3a8902397f364e5e784ae92152e9d998
| 5,423
|
py
|
Python
|
src/brain_training.py
|
irlrobot/memory_loss
|
cda0356653998313ffcc9211a2bb211699aff0d7
|
[
"Apache-2.0"
] | null | null | null |
src/brain_training.py
|
irlrobot/memory_loss
|
cda0356653998313ffcc9211a2bb211699aff0d7
|
[
"Apache-2.0"
] | null | null | null |
src/brain_training.py
|
irlrobot/memory_loss
|
cda0356653998313ffcc9211a2bb211699aff0d7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Memory Loss
github.com/irlrobot/memory_loss
"""
QUESTIONS = [
{
"question": "Remember these words, Epic, Bird, Taco, Sphere. "\
"Was the second word, bird?",
"answer": "yes"
},
# {
# "question": "Remember these words, Map, Computer, Fish, Blimp, "\
# "What was the first word?",
# "answer": "Map",
# "category": "repeat"
# },
# {
# "id": 87,
# "question": "Remember these words, French, Boat, Yesterday, Apple, "\
# "What was the second word?",
# "answer": "Boat",
# "category": "repeat"
# },
# {
# "id": 88,
# "question": "Remember these words, November, Star, Dream, Tuesday, "\
# "What was the last word?",
# "answer": "Tuesday",
# "category": "repeat"
# },
# {
# "id": 89,
# "question": "Remember these words, Drum, Animal, Mighty, Baron, "\
# "What was the second word?",
# "answer": "Animal",
# "category": "repeat"
# },
# {
# "id": 90,
# "question": "Remember these words, Saxophone, Analysis, War, Trumpet, "\
# "What was the third word?",
# "answer": "War",
# "category": "repeat"
# },
# {
# "id": 91,
# "question": "Remember these words, Party, Fire, Drink, Water, "\
# "What was the third word?",
# "answer": "Drink",
# "category": "repeat"
# },
# {
# "id": 92,
# "question": "Remember these words, Answer, Phone, Cancel, Shower, "\
# "What was the first word?",
# "answer": "Answer",
# "category": "repeat"
# },
# {
# "id": 93,
# "question": "Remember these words, Think, Observe, Sleep, Pizza, "\
# "What was the last word?",
# "answer": "Pizza",
# "category": "repeat"
# },
# {
# "id": 94,
# "question": "Remember these words, Pressure, Pants, Advance, Game, "\
# "What was the second word?",
# "answer": "Pants",
# "category": "repeat"
# },
# {
# "id": 95,
# "question": "Remember these words, Moment, Win, Heat, Growth, "\
# "What was the third word?",
# "answer": "Heat",
# "category": "repeat"
# },
# {
# "id": 96,
# "question": "Remember these numbers, 7, 2, 11, 4, "\
# "What was the lowest number?",
# "answer": "2",
# "category": "low_high_number"
# },
# {
# "id": 97,
# "question": "Remember these numbers, 3, 1, 9, 8, "\
# "What was the highest number?",
# "answer": "9",
# "category": "low_high_number"
# },
# {
# "id": 98,
# "question": "Remember these numbers, 12, 15, 18, 6, "\
# "What was the highest number?",
# "answer": "18",
# "category": "low_high_number"
# },
# {
# "id": 99,
# "question": "Remember these numbers, 17, 14, 15, 13, "\
# "What was the lowest number?",
# "answer": "13",
# "category": "low_high_number"
# },
# {
# "id": 100,
# "question": "Remember these numbers, 3, 8, 5, 4, "\
# "What was the lowest number?",
# "answer": "3",
# "category": "low_high_number"
# },
# {
# "id": 101,
# "question": "Remember these numbers, 13, 28, 18, 23, "\
# "What was the highest number?",
# "answer": "28",
# "category": "low_high_number"
# },
# {
# "id": 102,
# "question": "Remember these numbers, 6, 9, 5, 2, "\
# "What was the highest number?",
# "answer": "9",
# "category": "low_high_number"
# },
# {
# "id": 103,
# "question": "Remember these numbers, 37, 32, 38, 34, "\
# "What was the lowest number?",
# "answer": "32",
# "category": "low_high_number"
# },
# {
# "id": 104,
# "question": "Remember these numbers, 5, 7, 1, 3, "\
# "What was the highest number?",
# "answer": "7",
# "category": "low_high_number"
# },
# {
# "id": 105,
# "question": "Remember these numbers, 27, 22, 28, 25, "\
# "What was the lowest number?",
# "answer": "22",
# "category": "low_high_number"
# },
# {
# "id": 106,
# "question": "Remember these numbers, 13, 28, 18, 23. "\
# "Was the third number 38?",
# "answer": "no",
# "category": "repeat"
# },
# {
# "id": 107,
# "question": "Remember these numbers, 6, 9, 5, 2. "\
# "Was the second number 9?",
# "answer": "yes",
# "category": "repeat"
# },
# {
# "id": 108,
# "question": "Remember these numbers, 37, 32, 38, 34. "\
# "Was the second number 38?",
# "answer": "no",
# "category": "repeat"
# },
# {
# "id": 109,
# "question": "Remember these numbers, 5, 7, 1, 3. "\
# "Was the third number 1?",
# "answer": "yes",
# "category": "repeat"
# },
{
"question": "Remember these numbers, 27, 22, 28, 25. "\
"Was the first number 26?",
"answer": "no"
}
]
| 29.155914
| 86
| 0.441453
|
d9fcdb31bef7875b85928faa7e9da7b94594c152
| 4,945
|
py
|
Python
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/GPDBStorageBaseTestCase.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/GPDBStorageBaseTestCase.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/GPDBStorageBaseTestCase.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import glob
from time import sleep
import tinctest
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.PSQL import PSQL
from tinctest.lib import local_path, Gpdiff
from mpp.lib.filerep_util import Filerepe2e_Util
from mpp.lib.gprecoverseg import GpRecover
from mpp.lib.gpstart import GpStart
from mpp.lib.gpstop import GpStop
from mpp.lib.config import GPDBConfig
from mpp.lib.gpdbverify import GpdbVerify
from mpp.models import MPPTestCase
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from mpp.gpdb.tests.storage.lib.common_utils import *
class GPDBStorageBaseTestCase():
'''
Base Class for Storage test-suits like Crash Recovery,
Pg_Two_Phase, sub_transaction
'''
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.filereputil = Filerepe2e_Util()
self.gprecover = GpRecover(self.config)
self.gpstop = GpStop()
self.gpstart = GpStart()
self.gpverify = GpdbVerify(config=self.config)
self.dbstate = DbStateClass('run_validation', self.config)
self.port = os.getenv('PGPORT')
def invoke_fault(self, fault_name, type, role='mirror', port=None, occurence=None, sleeptime=None, seg_id=None):
''' Reset the fault and then issue the fault with the given type'''
self.filereputil.inject_fault(f=fault_name, y='reset', r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
self.filereputil.inject_fault(f=fault_name, y=type, r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
tinctest.logger.info('Successfully injected fault_name : %s fault_type : %s occurence : %s ' % (fault_name, type, occurence))
def start_db(self):
'''Gpstart '''
rc = self.gpstart.run_gpstart_cmd()
if not rc:
raise Exception('Failed to start the cluster')
tinctest.logger.info('Started the cluster successfully')
def stop_db(self):
''' Gpstop and dont check for rc '''
cmd = Command('Gpstop_a', 'gpstop -a')
tinctest.logger.info('Executing command: gpstop -a')
cmd.run()
def get_trigger_status(self, trigger_count,max_cnt=50):
'''Compare the pg_stat_activity count with the total number of trigger_sqls executed '''
psql_count=0
for i in range(1,trigger_count):
psql_count = PSQL.run_sql_command('select count(*) from pg_stat_activity;', flags='-q -t', dbname='postgres')
sleep(1)
tinctest.logger.info('Count of trigger sqls %s And it should be %s' % (psql_count, trigger_count))
if psql_count < trigger_count :
tinctest.logger.info('coming to the if loop in get_trigger_status')
return False
return True
def check_trigger_sql_hang(self, test_dir):
'''
@param ddl_type : create/drop
@param fault_type : commit/abort/end_prepare_two_phase_sleep
@description : Return the status of the trigger sqls: whether they are waiting on the fault
Since gpfaultinjector has no way to check if all the sqls are triggered, we are using
a count(*) on pg_stat_activity and compare the total number of trigger_sqls
'''
trigger_dir = local_path('%s_tests/trigger_sql/' % (test_dir))
trigger_count = len(glob.glob1(trigger_dir,"*.ans"))
return self.get_trigger_status(trigger_count)
def get_items_list(test_file):
''' Get file contents to a list '''
with open(test_file, 'r') as f:
test_list = [line.strip() for line in f]
return test_list
def validate_sql(filename):
''' Compare the out and ans files '''
out_file = local_path(filename.replace(".sql", ".out"))
ans_file = local_path(filename.replace('.sql' , '.ans'))
assert Gpdiff.are_files_equal(out_file, ans_file)
def run_sql(filename, verify=True):
''' Run the provided sql and validate it '''
out_file = local_path(filename.replace(".sql", ".out"))
PSQL.run_sql_file(sql_file = filename, out_file = out_file)
if verify == True:
validate_sql(filename)
| 40.203252
| 134
| 0.688777
|
a3132ec7423ecd10822e5238d7800b86aaa8625d
| 467
|
py
|
Python
|
add_data.py
|
Mashex/BookFlix
|
bdac6dbc56ba0c90f6ee7a2308f93e54dcb759c7
|
[
"Unlicense"
] | null | null | null |
add_data.py
|
Mashex/BookFlix
|
bdac6dbc56ba0c90f6ee7a2308f93e54dcb759c7
|
[
"Unlicense"
] | null | null | null |
add_data.py
|
Mashex/BookFlix
|
bdac6dbc56ba0c90f6ee7a2308f93e54dcb759c7
|
[
"Unlicense"
] | 1
|
2019-09-30T20:02:11.000Z
|
2019-09-30T20:02:11.000Z
|
import csv
from books.models import Book
with open('scripts/output_with_8000_desc_genre.csv') as f:
reader = csv.reader(f)
print(next(reader))
print ()
for row in reader:
Book.objects.create(goodreads_book_id = row[1],
book_id = row[0],
published_date = row[8],
author = row[7],
title = row[10],
original_title = row[9],
rating = row[12],
description = row[23],
image_url = row[21],
image_location = row[24],
genres = row[25])
| 22.238095
| 58
| 0.659529
|
02f113d4c59f6a32442085555ba2e2c3f1de8db4
| 1,941
|
py
|
Python
|
befunge/state.py
|
malloc47/befunge.py
|
6bf3f5f667200c1d4f4a389c3c1e82780743db2c
|
[
"FSFAP"
] | 1
|
2015-09-23T20:43:44.000Z
|
2015-09-23T20:43:44.000Z
|
befunge/state.py
|
malloc47/befunge.py
|
6bf3f5f667200c1d4f4a389c3c1e82780743db2c
|
[
"FSFAP"
] | null | null | null |
befunge/state.py
|
malloc47/befunge.py
|
6bf3f5f667200c1d4f4a389c3c1e82780743db2c
|
[
"FSFAP"
] | null | null | null |
import sys
from befunge.syntax import Tokens, directions
from befunge.board import BefungeBoard
class State(object):
"""keeps track of all state, along with the board and stack"""
def __init__(self, board=BefungeBoard()):
self.pos = (0, 0)
self.direction = Tokens.MDIR[0]
self.literal = False
self.board = board
self.stack = []
self.user_input = []
self.output_spool = ''
def move(self):
"""handle moving in self.direction and wrapping the board"""
def wrap(pos, size):
return pos if pos < size and pos >= 0 else (pos - size) % size
# move pointer and then wrap the coordinates if needed
self.pos = tuple(map(wrap,
map(
sum,
zip(
self.pos,
directions[self.direction])),
self.board.size()))
def push(self, n): self.stack.append(n)
def pop(self): return self.stack.pop() if len(self.stack) > 0 else 0
def peek(self): return self.stack[-1] if len(self.stack) > 0 else 0
def read(self): return self.board.get(self.pos)
def inpt(self, one=False):
# try to read from user_input first, so that the interpreter
# can be automated
try:
return self.user_input.pop()
except:
return sys.stdin.read(1) if one else input()
def output(self, s, display=True):
"""either spool up the output strings or push them to stdout"""
if not s: return
if display:
sys.stdout.write(str(s))
sys.stdout.flush()
else:
self.output_spool += s
def __repr__(self):
return ('<pos: ' + str(self.pos) + ', direction: '
+ str(self.direction) + ', literal: ' + str(self.literal))
| 32.898305
| 74
| 0.532715
|
9f1df44d3c4376c8e06d7957bc40e97488784ac0
| 639
|
py
|
Python
|
enthought/util/wx/__init__.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/util/wx/__init__.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
Latest/venv/Lib/site-packages/pyface/wx/__init__.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | null | null | null |
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#------------------------------------------------------------------------------
| 42.6
| 79
| 0.58216
|
4acfa2844b17899ada6716ce0403c97b76c3b664
| 1,313
|
py
|
Python
|
get_gt_txt.py
|
NJUShane/yolo3-pytorch
|
16b3711a10ba64d41a943bb6690232f9747385fb
|
[
"MIT"
] | 5
|
2021-07-07T17:32:32.000Z
|
2022-01-08T03:53:21.000Z
|
get_gt_txt.py
|
yddcode/yolo-keras
|
230c037d7a9240bb19b29116e8ba2d1f062df128
|
[
"MIT"
] | null | null | null |
get_gt_txt.py
|
yddcode/yolo-keras
|
230c037d7a9240bb19b29116e8ba2d1f062df128
|
[
"MIT"
] | 1
|
2020-10-30T04:07:04.000Z
|
2020-10-30T04:07:04.000Z
|
#----------------------------------------------------#
# 获取测试集的ground-truth
# 具体视频教程可查看
# https://www.bilibili.com/video/BV1zE411u7Vw
#----------------------------------------------------#
import sys
import os
import glob
import xml.etree.ElementTree as ET
image_ids = open('VOCdevkit/VOC2007/ImageSets/Main/test.txt').read().strip().split()
if not os.path.exists("./input"):
os.makedirs("./input")
if not os.path.exists("./input/ground-truth"):
os.makedirs("./input/ground-truth")
for image_id in image_ids:
with open("./input/ground-truth/"+image_id+".txt", "w") as new_f:
root = ET.parse("VOCdevkit/VOC2007/Annotations/"+image_id+".xml").getroot()
for obj in root.findall('object'):
if obj.find('difficult')!=None:
difficult = obj.find('difficult').text
if int(difficult)==1:
continue
obj_name = obj.find('name').text
bndbox = obj.find('bndbox')
left = bndbox.find('xmin').text
top = bndbox.find('ymin').text
right = bndbox.find('xmax').text
bottom = bndbox.find('ymax').text
new_f.write("%s %s %s %s %s\n" % (obj_name, left, top, right, bottom))
print("Conversion completed!")
| 37.514286
| 85
| 0.529322
|
0fba1edcb797a9383727db3a51e7d4b53c476679
| 3,304
|
py
|
Python
|
tests/chainer_tests/functions_tests/array_tests/test_permutate.py
|
yuhonghong66/chainer
|
15d475f54fc39587abd7264808c5e4b33782df9e
|
[
"MIT"
] | 1
|
2019-02-12T23:10:16.000Z
|
2019-02-12T23:10:16.000Z
|
tests/chainer_tests/functions_tests/array_tests/test_permutate.py
|
nolfwin/chainer
|
8d776fcc1e848cb9d3800a6aab356eb91ae9d088
|
[
"MIT"
] | 2
|
2018-01-09T23:05:30.000Z
|
2018-01-19T01:19:34.000Z
|
tests/chainer_tests/functions_tests/array_tests/test_permutate.py
|
nolfwin/chainer
|
8d776fcc1e848cb9d3800a6aab356eb91ae9d088
|
[
"MIT"
] | 1
|
2018-05-28T22:43:34.000Z
|
2018-05-28T22:43:34.000Z
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[{'shape': (3,), 'dtype': 'f', 'axis': 0, 'inv': False},
{'shape': (3,), 'dtype': 'f', 'axis': -1, 'inv': True},
{'shape': (3, 4), 'dtype': 'd', 'axis': 1, 'inv': True},
{'shape': (3, 4, 5), 'dtype': 'f', 'axis': 2, 'inv': False}],
[{'label_dtype': numpy.int8},
{'label_dtype': numpy.int16},
{'label_dtype': numpy.int32},
{'label_dtype': numpy.int64}]
))
class TestPermutate(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.g = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.indices = numpy.random.permutation(
self.shape[self.axis]).astype(self.label_dtype)
def check_forward(self, x_data, ind_data):
x = chainer.Variable(x_data)
indices = chainer.Variable(ind_data)
y = functions.permutate(x, indices, axis=self.axis, inv=self.inv)
y_cpu = cuda.to_cpu(y.data)
y_cpu = numpy.rollaxis(y_cpu, axis=self.axis)
x_data = numpy.rollaxis(self.x, axis=self.axis)
for i, ind in enumerate(self.indices):
if self.inv:
numpy.testing.assert_array_equal(y_cpu[ind], x_data[i])
else:
numpy.testing.assert_array_equal(y_cpu[i], x_data[ind])
def test_forward_cpu(self):
self.check_forward(self.x, self.indices)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.indices))
@attr.gpu
def test_forward_mixed(self):
self.check_forward(cuda.to_gpu(self.x), self.indices)
def check_backward(self, x_data, ind_data, g_data):
def fun(x, ind):
return functions.permutate(x, ind, self.axis, self.inv)
gradient_check.check_backward(
fun, (x_data, ind_data), g_data, dtype='d', atol=0.001, rtol=0.001)
def test_backward_cpu(self):
self.check_backward(self.x, self.indices, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x),
cuda.to_gpu(self.indices),
cuda.to_gpu(self.g))
@testing.parameterize(
{'indices': [0, 0]},
{'indices': [-1, 0]},
{'indices': [0, 2]},
)
class TestPermutateInvalidIndices(unittest.TestCase):
def setUp(self):
self.x = numpy.arange(10).reshape((2, 5)).astype('f')
self.ind = numpy.array(self.indices, 'i')
self.debug = chainer.is_debug()
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(self.debug)
def check_invalid(self, x_data, ind_data):
x = chainer.Variable(x_data)
ind = chainer.Variable(ind_data)
with self.assertRaises(ValueError):
functions.permutate(x, ind)
def test_invlaid_cpu(self):
self.check_invalid(self.x, self.ind)
@attr.gpu
def test_invlaid_gpu(self):
self.check_invalid(cuda.to_gpu(self.x), cuda.to_gpu(self.ind))
testing.run_module(__name__, __file__)
| 32.07767
| 79
| 0.625303
|
dd9acf0ded61a3539b7a9b0f85310909b76cb63c
| 3,615
|
py
|
Python
|
DQN_PER.py
|
wotmd5731/pytorch_dqn
|
fb3062c3aff1e5e249551807e53e974363f7595c
|
[
"MIT"
] | 11
|
2018-04-22T16:03:17.000Z
|
2021-09-02T09:10:04.000Z
|
DQN_PER.py
|
wotmd5731/pytorch_dqn
|
fb3062c3aff1e5e249551807e53e974363f7595c
|
[
"MIT"
] | null | null | null |
DQN_PER.py
|
wotmd5731/pytorch_dqn
|
fb3062c3aff1e5e249551807e53e974363f7595c
|
[
"MIT"
] | 1
|
2021-04-06T12:12:19.000Z
|
2021-04-06T12:12:19.000Z
|
# -*- coding: utf-8 -*-
import random
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
#import torchvision.transforms as T
import argparse
from argument import get_args
args = get_args('DQN_PER')
from env import Env
env = Env(args)
from memory import ReplayMemory ,PER_Memory
#memory = ReplayMemory(args)
memory = PER_Memory(args.memory_capacity)
#args.memory_capacity = 20000
#args.learn_start = 20000
#args.render= True
from agent import Agent
agent = Agent(args)
"""
define test function
"""
from plot import _plot_line
current_time = time.time()
Ts, Trewards, Qs = [], [], []
def test(main_episode):
global current_time
prev_time = current_time
current_time = time.time()
T_rewards, T_Qs = [], []
Ts.append(main_episode)
total_reward = 0
episode = 0
while episode < args.evaluation_episodes:
episode += 1
T=0
reward_sum=0
state = env.reset()
while T < args.max_step:
T += 1
if args.render:
env.render()
action = agent.get_action(state,evaluate=True)
next_state , reward , done, _ = env.step(action)
state = next_state
total_reward += reward
reward_sum += reward
if done:
break
T_rewards.append(reward_sum)
ave_reward = total_reward/args.evaluation_episodes
# Append to results
Trewards.append(T_rewards)
# Qs.append(T_Qs)
# Plot
_plot_line(Ts, Trewards, 'rewards_'+args.name+args.game, path='results')
# _plot_line(Ts, Qs, 'Q', path='results')
# Save model weights
# main_dqn.save('results')
print('episode: ',main_episode,'Evaluation Average Reward:',ave_reward, 'delta time:',current_time-prev_time)
# if ave_reward >= 300:
# break
"""
randomize state push in memory
before main loop start
"""
global_count = 0
episode = 0
while True:
episode += 1
T=0
state = env.reset()
while T < args.max_step:
action = random.randrange(0,args.action_space)
next_state , reward , done, _ = env.step(action)
td = agent.get_td_error(reward,state,action,next_state)
memory.push(td,[state, action, reward, next_state, done])
state = next_state
T += 1
global_count += 1
if done :
break
print("\r push : %d/%d "%(global_count,args.learn_start),end='\r',flush=True)
if global_count > args.learn_start:
break
print('')
"""
main loop
"""
global_count = 0
episode = 0
while episode < args.max_episode_length:
episode += 1
T=0
state = env.reset()
while T < args.max_step:
T += 1
global_count += 1
action = agent.get_action(state)
next_state , reward , done, _ = env.step(action)
if args.reward_clip > 0:
reward = max(min(reward, args.reward_clip), -args.reward_clip) # Clip rewards
td = agent.get_td_error(reward,state,action,next_state)
memory.push(td,[state, action, reward, next_state, done])
state = next_state
if global_count % args.replay_interval == 0 :
agent.PER_learn(memory)
if global_count % args.target_update_interval == 0 :
agent.target_dqn_update()
if done :
break
if episode % args.evaluation_interval == 0 :
test(episode)
| 25.104167
| 113
| 0.606639
|
94248f87358cce1640cd799f2c0d707264159c25
| 4,799
|
py
|
Python
|
python/level2_simple_inference/2_object_detection/YOLOV3_coco_detection_picture/src/object_detect.py
|
coldenheart/123
|
798768bba7dfaef051a46d8e1df48bc671de5213
|
[
"Apache-2.0"
] | null | null | null |
python/level2_simple_inference/2_object_detection/YOLOV3_coco_detection_picture/src/object_detect.py
|
coldenheart/123
|
798768bba7dfaef051a46d8e1df48bc671de5213
|
[
"Apache-2.0"
] | null | null | null |
python/level2_simple_inference/2_object_detection/YOLOV3_coco_detection_picture/src/object_detect.py
|
coldenheart/123
|
798768bba7dfaef051a46d8e1df48bc671de5213
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append("../../../../common")
sys.path.append("../")
import os
import numpy as np
import acl
import atlas_utils.utils as utils
from PIL import Image, ImageDraw, ImageFont
from atlas_utils.acl_dvpp import Dvpp
import atlas_utils.constants as const
from atlas_utils.acl_model import Model
from atlas_utils.acl_image import AclImage
from atlas_utils.acl_resource import AclResource
labels = ["person",
"bicycle", "car", "motorbike", "aeroplane",
"bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant",
"bear", "zebra", "giraffe", "backpack", "umbrella", "handbag",
"tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball",
"kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog",
"pizza", "donut", "cake", "chair", "sofa", "potted plant", "bed", "dining table",
"toilet", "TV monitor", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
"scissors", "teddy bear", "hair drier", "toothbrush"]
MODEL_PATH = "../model/yolov3_yuv.om"
MODEL_WIDTH = 416
MODEL_HEIGHT = 416
def pre_process(image, dvpp):
"""preprocess"""
image_input = image.copy_to_dvpp()
yuv_image = dvpp.jpegd(image_input)
print("decode jpeg end")
resized_image = dvpp.crop_and_paste(yuv_image, image.width, image.height, MODEL_WIDTH, MODEL_HEIGHT)
print("resize yuv end")
return resized_image
def post_process(infer_output, origin_img, image_file):
"""postprocess"""
print("post process")
print(infer_output[1])
box_num = infer_output[1][0, 0]
print("box num ", box_num)
box_info = infer_output[0].flatten()
print ("\n")
print(box_info[0:6 * box_num].reshape(6, box_num))
scalex = origin_img.width / MODEL_WIDTH
scaley = origin_img.height / MODEL_HEIGHT
if scalex > scaley:
scaley = scalex
output_path = os.path.join("../outputs", os.path.basename(image_file))
origin_image = Image.open(image_file)
draw = ImageDraw.Draw(origin_image)
font = ImageFont.load_default()
print("images:{}".format(image_file))
print("======== inference results: =============")
for n in range(int(box_num)):
ids = int(box_info[5 * int(box_num) + n])
label = labels[ids]
score = box_info[4 * int(box_num)+n]
top_left_x = box_info[0 * int(box_num)+n] * scaley
top_left_y = box_info[1 * int(box_num)+n] * scaley
bottom_right_x = box_info[2 * int(box_num) + n] * scaley
bottom_right_y = box_info[3 * int(box_num) + n] * scaley
print(" % s: class % d, box % d % d % d % d, score % f" % (
label, ids, top_left_x, top_left_y,
bottom_right_x, bottom_right_y, score))
draw.line([(top_left_x, top_left_y), (bottom_right_x, top_left_y), (bottom_right_x, bottom_right_y), \
(top_left_x, bottom_right_y), (top_left_x, top_left_y)], fill=(0, 200, 100), width=3)
draw.text((top_left_x, top_left_y), label, font=font, fill=255)
origin_image.save(output_path)
def construct_image_info():
"""construct image info"""
image_info = np.array([MODEL_WIDTH, MODEL_HEIGHT,
MODEL_WIDTH, MODEL_HEIGHT],
dtype = np.float32)
return image_info
def main():
"""
Program execution with picture directory parameters
"""
if (len(sys.argv) != 2):
print("The App arg is invalid")
exit(1)
acl_resource = AclResource()
acl_resource.init()
model = Model(MODEL_PATH)
dvpp = Dvpp(acl_resource)
#From the parameters of the picture storage directory, reasoning by a picture
image_dir = sys.argv[1]
images_list = [os.path.join(image_dir, img)
for img in os.listdir(image_dir)
if os.path.splitext(img)[1] in const.IMG_EXT]
#Create a directory to store the inference results
if not os.path.isdir('../outputs'):
os.mkdir('../outputs')
image_info = construct_image_info()
for image_file in images_list:
#read picture
image = AclImage(image_file)
#preprocess image
resized_image = pre_process(image, dvpp)
print("pre process end")
#reason pictures
result = model.execute([resized_image, image_info])
#process resresults
post_process(result, image, image_file)
if __name__ == '__main__':
main()
| 37.787402
| 110
| 0.62638
|
3d45fc30ab899b62ab8e13a78f05b881621256c2
| 9,329
|
py
|
Python
|
tests/unit/service/test_messaging.py
|
davetobin/ignition
|
eb183dca3fb2041d3f6249467a3265e7eb1d8905
|
[
"Apache-2.0"
] | 1
|
2019-09-02T15:23:08.000Z
|
2019-09-02T15:23:08.000Z
|
tests/unit/service/test_messaging.py
|
davetobin/ignition
|
eb183dca3fb2041d3f6249467a3265e7eb1d8905
|
[
"Apache-2.0"
] | 62
|
2019-09-16T14:51:32.000Z
|
2020-07-08T13:28:50.000Z
|
tests/unit/service/test_messaging.py
|
accanto-systems/ignition
|
87087b81dfa7f8f69525f4dd9c74db715e336eca
|
[
"Apache-2.0"
] | 4
|
2021-08-17T14:38:54.000Z
|
2022-02-09T14:33:57.000Z
|
import unittest
import time
import copy
from unittest.mock import patch, MagicMock, call
from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties
from kafka import KafkaProducer
class TestPostalService(unittest.TestCase):
def setUp(self):
self.mock_delivery_service = MagicMock()
def test_init_without_delivery_service_throws_error(self):
with self.assertRaises(ValueError) as context:
PostalService()
self.assertEqual(str(context.exception), 'delivery_service argument not provided')
def test_post_sends_envelope_to_delivery_service(self):
postal_service = PostalService(delivery_service=self.mock_delivery_service)
test_envelope = Envelope('test', Message('test message'))
postal_service.post(test_envelope)
self.mock_delivery_service.deliver.assert_called_once_with(test_envelope)
def test_post_throws_error_when_envelope_is_none(self):
postal_service = PostalService(delivery_service=self.mock_delivery_service)
with self.assertRaises(ValueError) as context:
postal_service.post(None)
self.assertEqual(str(context.exception), 'An envelope must be passed to post a message')
class TestKafkaDeliveryService(unittest.TestCase):
def setUp(self):
self.messaging_properties = MessagingProperties()
self.messaging_properties.connection_address='test:9092'
self.messaging_properties.config={'api_version_auto_timeout_ms': 5000}
def test_init_without_messaging_config_throws_error(self):
with self.assertRaises(ValueError) as context:
KafkaDeliveryService()
self.assertEqual(str(context.exception), 'messaging_properties argument not provided')
def test_init_without_bootstrap_servers_throws_error(self):
messaging_properties = MessagingProperties()
messaging_properties.connection_address=None
with self.assertRaises(ValueError) as context:
KafkaDeliveryService(messaging_properties=messaging_properties)
self.assertEqual(str(context.exception), 'connection_address not set on messaging_properties')
@patch('ignition.service.messaging.KafkaProducer')
def test_deliver(self, mock_kafka_producer_init):
# need to set this explicitly because we've patched KafkaProducer
mock_kafka_producer_init.DEFAULT_CONFIG = KafkaProducer.DEFAULT_CONFIG
delivery_service = KafkaDeliveryService(messaging_properties=self.messaging_properties)
test_envelope = Envelope('test_topic', Message('test message'))
delivery_service.deliver(test_envelope)
mock_kafka_producer_init.assert_called_once_with(bootstrap_servers='test:9092', api_version_auto_timeout_ms=5000, client_id='ignition')
self.assertEqual(delivery_service.producer, mock_kafka_producer_init.return_value)
mock_kafka_producer = mock_kafka_producer_init.return_value
mock_kafka_producer.send.assert_called_once_with('test_topic', b'test message')
@patch('ignition.service.messaging.KafkaProducer')
def test_deliver_throws_error_when_envelope_is_none(self, mock_kafka_producer_init):
delivery_service = KafkaDeliveryService(messaging_properties=self.messaging_properties)
with self.assertRaises(ValueError) as context:
delivery_service.deliver(None)
self.assertEqual(str(context.exception), 'An envelope must be passed to deliver a message')
class TestKafkaInboxService(unittest.TestCase):
def setUp(self):
self.messaging_properties = MessagingProperties()
self.messaging_properties.connection_address='test:9092'
self.messaging_properties.config={'api_version_auto_timeout_ms':5000}
def test_init_without_messaging_config_throws_error(self):
with self.assertRaises(ValueError) as context:
KafkaInboxService()
self.assertEqual(str(context.exception), 'messaging_properties argument not provided')
def test_init_without_bootstrap_servers_throws_error(self):
messaging_properties = MessagingProperties()
messaging_properties.connection_address=None
with self.assertRaises(ValueError) as context:
KafkaInboxService(messaging_properties=messaging_properties)
self.assertEqual(str(context.exception), 'connection_address not set on messaging_properties')
@patch('ignition.service.messaging.KafkaInboxThread')
def test_watch_inbox_starts_thread(self, mock_kafka_inbox_thread_init):
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
mock_kafka_inbox_thread_init.assert_called_once_with('test:9092', 'test_group', 'test_topic', mock_read_inbox_func, inbox_service._KafkaInboxService__thread_exit_func, self.messaging_properties.config)
mock_kafka_inbox_thread_init.return_value.start.assert_called_once()
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_inits_consumer(self, mock_kafka_consumer_init):
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
mock_kafka_consumer_init.assert_called_once_with('test_topic', bootstrap_servers='test:9092', group_id='test_group', enable_auto_commit=False)
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_inits_consumer(self, mock_kafka_consumer_init):
mock_kafka_consumer = mock_kafka_consumer_init.return_value
mock_record_1 = MagicMock()
mock_record_2 = MagicMock()
infinite_iter_stop = False
infinite_iter_has_stopped = False
ready_for_second_message = False
second_message_sent = False
def build_iter():
def iter():
yield mock_record_1
while not infinite_iter_stop:
if ready_for_second_message:
yield mock_record_2
break
while not infinite_iter_stop:
time.sleep(0.001)
infinite_iter_has_stopped = True
return iter
mock_kafka_consumer.__iter__.side_effect = build_iter()
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
time.sleep(0.01)
try:
self.assertEqual(len(inbox_service.active_threads), 1)
expected_config = copy.copy(self.messaging_properties.config)
expected_config = {
'bootstrap_servers': 'test:9092',
'group_id': 'test_group',
'enable_auto_commit': False,
'client_id': 'ignition'
}
mock_kafka_consumer_init.assert_called_once_with('test_topic', **expected_config)
mock_kafka_consumer.__iter__.assert_called_once()
mock_record_1.value.decode.assert_called_once_with('utf-8')
mock_record_2.value.decode.assert_not_called()
mock_read_inbox_func.assert_called_once_with(mock_record_1.value.decode.return_value)
mock_kafka_consumer.commit.assert_called_once()
ready_for_second_message = True
time.sleep(1)
mock_record_2.value.decode.assert_called_once_with('utf-8')
mock_read_inbox_func.assert_called_with(mock_record_2.value.decode.return_value)
mock_kafka_consumer.commit.assert_has_calls([call(), call()])
finally:
infinite_iter_stop = True
time.sleep(1)
mock_kafka_consumer.close.assert_called_once()
self.assertEqual(len(inbox_service.active_threads), 0)
@patch('ignition.service.messaging._thread')
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_calls_exit_func_on_error(self, mock_kafka_consumer_init, mock_thread):
mock_kafka_consumer = mock_kafka_consumer_init.return_value
mock_record_1 = MagicMock()
infinite_iter_stop = False
ready_for_message = True
def build_iter():
def iter():
while not infinite_iter_stop:
if ready_for_message:
yield mock_record_1
break
return iter
mock_kafka_consumer.__iter__.side_effect = build_iter()
inbox_service = KafkaInboxService(test_mode=True, messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
mock_read_inbox_func.side_effect = ValueError('Test error')
self.assertFalse(inbox_service.exited)
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
ready_for_message = True
time.sleep(0.03)
## Indicates the exit func on inbox_service was called when in "test_mode"
self.assertTrue(inbox_service.exited)
mock_kafka_consumer.commit.assert_not_called()
| 51.541436
| 209
| 0.731054
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.