content
stringlengths 5
1.05M
|
|---|
def solution(A, B):
answer = 0
A.sort()
B.sort(reverse=True)
for i, j in zip(A, B):
answer += i*j
return answer
|
images = {
# key is image name, value is pygame image
}
def register_images():
from pygame.image import load
from Graphics import RendererManager
import os
# get the directory of the images relative to this file
directory = os.path.join(__file__.split("Graphics")[0], "assets", "images")
files = os.listdir(directory)
for file_name in files:
full_path = os.path.join(directory, file_name)
name_without_extension = file_name.split(".")[0]
extension = file_name.split(".")[1]
if extension == 'png':
images[name_without_extension] = load(full_path).convert_alpha(RendererManager.screen)
else:
images[name_without_extension] = load(full_path).convert(RendererManager.screen)
def get_image(name):
return images[name]
|
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.conf import settings
from .auth.views import account_profile
from .views import member_index, member_action
urlpatterns = [
# Landing page area
url(r'^$', TemplateView.as_view(template_name='visitor/landing-index.html'), name='landing_index'),
url(r'^about$', TemplateView.as_view(template_name='visitor/landing-about.html'), name='landing_about'),
url(r'^terms/$', TemplateView.as_view(template_name='visitor/terms.html'), name='website_terms'),
url(r'^contact$', TemplateView.as_view(template_name='visitor/contact.html'), name='website_contact'),
# Account management is done by allauth
url(r'^accounts/', include('allauth.urls')),
# Account profile and member info done locally
url(r'^accounts/profile/$', account_profile, name='account_profile'),
url(r'^member/$', member_index, name='user_home'),
url(r'^member/action$', member_action, name='user_action'),
# Usual Django admin
url(r'^admin/', admin.site.urls),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
from .node import Node
class Temp_Node(Node):
def __init__(self,
parent: Node,
blank: str = ' '):
super().__init__(value=None)
self.parent = parent
self.width = parent.width
self.left_pad = parent.left_pad
self.right_pad = parent.right_pad
self.blank: str = blank
parent.children.append(self)
def __str__(self):
return self.blank * (self.left_pad + self.width + self.right_pad)
def display(self):
return self.__str__()
def __repr__(self) -> str:
return f'TempNode(width={self.width})'
|
from django.contrib import admin
from .models import SteamID, SteamInfo, VacInfo
class SteamIDAdmin(admin.ModelAdmin):
list_display = (
'pk',
'steamid',
'pub_date',
'author',
)
list_editable = ('steamid',)
search_fields = ('steamid',)
list_filter = ('pub_date',)
empty_value_display = '-пусто-'
class SteamInfoAdmin(admin.ModelAdmin):
list_display = (
'pk',
'steamid',
'pub_date',
'author',
)
list_editable = ('steamid',)
search_fields = ('steamid',)
list_filter = ('pub_date',)
empty_value_display = '-пусто-'
class VacInfoAdmin(admin.ModelAdmin):
list_display = (
'pk',
'steamid',
'pub_date',
'author',
)
list_editable = ('steamid',)
search_fields = ('steamid',)
list_filter = ('pub_date',)
empty_value_display = '-пусто-'
admin.site.register(SteamID, SteamIDAdmin)
admin.site.register(SteamInfo, SteamInfoAdmin)
admin.site.register(VacInfo, VacInfoAdmin)
|
# -*- coding: utf-8 -*-
import typing
from ...config import Config
from ...constant.characters import CharacterFactory
from .....script import Script, Action, SendLabel, SendFocusWindow
from ...constant.talent_category_association import TC
from ...constant.windows import window_index
from ... import act
send_focus_window_trigger = SendFocusWindow(
name="",
actions=[
act.General.TRIGGER,
]
)
def _has_send_focus_window(actions: typing.List[Action]) -> bool:
"""
Identify whether there is a ``SendFocusWindow`` in actions.
:param actions:
:return:
"""
for act in actions:
if isinstance(act, SendFocusWindow):
return True
return False
def _add_send_focus_window_if_not_available(actions: typing.List[Action]) -> bool:
if _has_send_focus_window(actions):
return False
else:
actions.append(send_focus_window_trigger)
return True
def litgoatdk_abcde_team_death_grip(config: 'Config', script: Script):
from .. import hk_g07_skills
# if config.active_character_config.is_char_exists(CharacterFactory.make_char_fatmulti1_litgoatdka_pve_blood_dk()):
hk_g07_skills.hk_g.actions.append(
SendLabel(
name=TC.dk.name,
to=config.lbs_by_tc(TC.dk),
actions=[
act.DK.ALL_SPEC_DEATH_GRIP_KEY_G,
]
)
)
from ..hk_g08_alt_numpad_1_to_12 import hk_alt_numpad_1, hk_alt_numpad_2, hk_alt_numpad_3
if config.active_character_config.is_char_exists(CharacterFactory.make_char_fatmulti2_litgoatdkb_pvp_frost_dk()):
hk_alt_numpad_1.actions.append(
SendLabel(
name="dk2",
to=[CharacterFactory.make_char_fatmulti2_litgoatdkb_pvp_frost_dk().window_label, ],
actions=[
act.Target.TARGET_FOCUS_TARGET,
act.DK.ALL_SPEC_DEATH_GRIP_KEY_G,
]
)
)
if config.active_character_config.is_char_exists(CharacterFactory.make_char_fatmulti3_litgoatdkc_pvp_frost_dk()):
hk_alt_numpad_2.actions.append(
SendLabel(
name="dk3",
to=[CharacterFactory.make_char_fatmulti3_litgoatdkc_pvp_frost_dk().window_label, ],
actions=[
act.Target.TARGET_FOCUS_TARGET,
act.DK.ALL_SPEC_DEATH_GRIP_KEY_G,
]
)
)
if config.active_character_config.is_char_exists(CharacterFactory.make_char_fatmulti4_litgoatdkd_pvp_frost_dk()):
hk_alt_numpad_3.actions.append(
SendLabel(
name="dk4",
to=[CharacterFactory.make_char_fatmulti4_litgoatdkd_pvp_frost_dk().window_label, ],
actions=[
act.Target.TARGET_FOCUS_TARGET,
act.DK.ALL_SPEC_DEATH_GRIP_KEY_G,
]
)
)
def boomkin_round_robin_starfall(config: 'Config', script: Script):
from ..hk_g08_alt_numpad_1_to_12 import hk_alt_numpad_1, hk_alt_numpad_2, hk_alt_numpad_3
from ..hk_g09_ctrl_numpad_1_to_12 import hk_ctrl_numpad_1, hk_ctrl_numpad_2, hk_ctrl_numpad_3
hk_alt_numpad_1.actions.append(
SendLabel(
name="balance druid 1",
to=[window_index[11].label, ],
actions=[
act.Druid.BALANCE_SPEC_STAR_FALL_ALT_F
]
)
)
hk_alt_numpad_2.actions.append(
SendLabel(
name="balance druid 2",
to=[window_index[12].label, ],
actions=[
act.Druid.BALANCE_SPEC_STAR_FALL_ALT_F
]
)
)
hk_alt_numpad_3.actions.append(
SendLabel(
name="balance druid 3",
to=[window_index[13].label, ],
actions=[
act.Druid.BALANCE_SPEC_STAR_FALL_ALT_F
]
)
)
hk_ctrl_numpad_1.actions.append(
SendLabel(
name="balance druid 1",
to=[window_index[11].label, ],
actions=[
act.Druid.BALANCE_SPEC_TYPHOON_KEY_G
]
)
)
hk_ctrl_numpad_2.actions.append(
SendLabel(
name="balance druid 1",
to=[window_index[12].label, ],
actions=[
act.Druid.BALANCE_SPEC_TYPHOON_KEY_G
]
)
)
hk_ctrl_numpad_3.actions.append(
SendLabel(
name="balance druid 1",
to=[window_index[13].label, ],
actions=[
act.Druid.BALANCE_SPEC_TYPHOON_KEY_G
]
)
)
def druid_all_stealth(config: 'Config', script: Script):
from .. import hk_g07_skills
hk_g07_skills.hk_alt_f1.actions = [
SendLabel(
name=TC.druid.name,
to=config.lbs_by_tc(tc=TC.druid),
actions=[
act.Druid.ALL_SPEC_CAT_STEALTH_MACRO,
]
)
]
def lgms_ijkl_shadow_priest_group(config: 'Config', script: Script):
from .. import hk_g07_skills
hk_g07_skills.hk_g.actions.append(
SendLabel(
name=TC.priest.name,
to=config.lbs_by_tc(tc=TC.priest),
actions=[
act.Priest.ALL_SPEC_HOLY_NOVA,
]
)
)
def resto_shaman_earth_shield(config: 'Config', script: Script):
from ..hk_g07_skills import hk_z
hk_z.actions.append(
SendLabel(
name=TC.shaman_resto.name,
to=config.lbs_by_tc(tc=TC.shaman_resto)[:1],
actions=[
act.Target.TARGET_FOCUS,
act.Shaman.RESTO_SPEC_EARTH_SHIELD,
]
)
)
_add_send_focus_window_if_not_available(hk_z.actions)
|
"""Distributed modeling."""
from typing import Dict, Tuple, Union
import functools
import jax.numpy as jnp
from jax import vmap
import jax.lax as lax
import jax.ops
# import numpy as np
import oryx
import distributed_cox.generic.taylor as taylor
sow = oryx.core.sow
reap = oryx.core.reap
plant = oryx.core.plant
nest = oryx.core.nest
# Reduction primitives
# ------------------------------------------------------------
def cumsum(vals, *, name: str):
"""Custom cumsum for modeling."""
vals = sow(vals, tag="pre_cumsum", name=name, mode="clobber")
return sow(
jnp.cumsum(vals, axis=0, dtype=None),
tag="cumsum",
name=name,
mode="clobber",
)
def sum(vals, *, name: str): # pylint: disable=redefined-builtin
"""Custom sum for modeling."""
vals = sow(vals, tag="pre_sum", name=name, mode="clobber")
return sow(jnp.sum(vals, axis=0, dtype=None),
tag="sum",
name=name,
mode="clobber")
# end primitives
# ------------------------------------------------------------
def distribute(fun, reduction_kind: str = "sum"):
"""Partitions a function into distributed version.
Assuming `fun` contains invocations of the collective primitives, this
function partitions `fun` into a composition of two functions
`fun_partt1` and `fun_partt2`.
The arguments to `fun_part1` are the same as `fun`; the outputs of `fun_part1`
are the intermediate values right before the collective primitives in `fun`.
The arguments to `fun_part2` contains two additional arguments compared to
`fun`: `intermediates` and `group_labels`.
`intermediates` has an additional group dimension compared to the
`intermediates` output in `fun_part1`.
`group_labels` is a global array containing individual group labels.
Calling `fun_part1` on multiple sub-divisions of the original inputs,
followed by `fun_part2` which collects all the result together, will return
the same result as simply calling `fun`.
"""
pt1_fun = reap(fun, tag="pre_" + reduction_kind)
def pt2_fun(intermediates, group_labels, *args, **kwargs):
intermediates = dict(intermediates)
for name in intermediates:
intermediate = intermediates[name]
K, *_ = intermediate.shape
if reduction_kind == "cumsum":
def groupped_cumsum(intermediate, carry, group_label):
group_cnts, curr_sum = carry
group_cnt_before = group_cnts[group_label]
val = intermediate[group_label, group_cnt_before]
curr_sum = curr_sum + val
group_cnts = jax.ops.index_add(group_cnts, group_label, 1)
return (group_cnts, curr_sum), curr_sum
_, intermediate_reduced = lax.scan(
functools.partial(groupped_cumsum, intermediate),
init=(
jnp.zeros(K, dtype=jnp.int32),
jnp.zeros(intermediate.shape[2:], dtype=intermediate.dtype),
),
xs=group_labels,
)
elif reduction_kind == "sum":
intermediate_reduced = jnp.sum(intermediate, axis=(0, 1))
else:
raise TypeError("Invalid reduction kind")
intermediates[name] = intermediate_reduced
return plant(fun, tag=reduction_kind)(intermediates, *args, **kwargs)
return pt1_fun, pt2_fun
def taylor_distribute(fun,
*,
reduction_kind: str,
orders: Dict[str, int],
argnums: Union[int, Tuple[int]] = 0):
"""Taylor distributes function.
First performs taylor expansion on ``fun``. Then, the function is broken into
two parts based on a reduction. The reduction is defined by, for example,
invoking :py:func:`cumsum` in ``fun``. The first part of the function is
mapped by an additional batch axis using :py:func:`jax.vmap`, which allows
simultanenous computation of the first part of the function across distributed
sites. Then, the second part of the function reduces the result from those
distributed sites, and returns the aggregated output.
Args:
fun: the function to be taylor expand then distributed.
reduction_kind: the kind of the reduction. This assumes that the
``reduction_kind`` is used in ``fun``.
orders: a mapping from string names of the :py:func:`taylor_expand` invoked
in ``fun`` to their taylor expansion orders.
argnums: the arguments with which the taylor expansion should be performed.
Returns:
the taylor expanded then distributed version of ``fun``.
"""
if isinstance(argnums, int):
argnums = (argnums,)
@functools.wraps(fun)
def wrapped(*args, **kwargs):
approx_fn = functools.partial(fun, **kwargs)
for name, order in orders.items():
approx_fn = taylor.taylor_approx_expand(approx_fn,
argnums=argnums,
name=name,
order=order)
if reduction_kind is not None:
pt1_fn, pt2_fn = distribute(approx_fn, reduction_kind=reduction_kind)
else:
pt1_fn, pt2_fn = approx_fn, None
n_single_args = len(args) // 2
single_args = args[:n_single_args]
group_labels = args[n_single_args]
dist_args = args[n_single_args + 1:]
in_axes = tuple(None if i in argnums else 0
for i in range(len(single_args) + len(argnums)))
pt1_fn = vmap(pt1_fn, in_axes=in_axes)
diff_args = [arg for i, arg in enumerate(dist_args) if i in argnums]
pt1_args = [
single_args[i] if i in argnums else arg
for i, arg in enumerate(dist_args)
] + diff_args
intermediates = pt1_fn(*pt1_args)
if reduction_kind is None:
return intermediates
return pt2_fn(intermediates, group_labels, *single_args,
*[arg for i, arg in enumerate(single_args) if i in argnums])
return wrapped
|
from dotenv import load_dotenv
from os import getenv
load_dotenv()
DATABASE_NAME = getenv('DB_DATABASE')
HOST = getenv('DB_HOST')
USER = getenv('DB_USERNAME')
PASSWORD = getenv('DB_PASSWORD')
PORT = int(getenv('DB_PORT'))
SECRET = getenv('SECRET')
import mysql.connector
mydb = mysql.connector.connect(
host=HOST,
user=USER,
password=PASSWORD,
database=DATABASE_NAME
)
mycursor = mydb.cursor()
mycursor.execute('DELETE FROM userfeed')
mycursor.execute('SELECT id FROM feed')
feed = mycursor.fetchall()
mycursor.execute('SELECT id FROM accounts')
accounts = mycursor.fetchall()
for u in accounts:
for f in feed:
mycursor.execute('INSERT INTO userfeed (user_id, feed_id) VALUEs (%s, %s)', (u[0], f[0]))
mydb.commit()
print('done :)')
|
# -*- Coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Mark Koennecke <mark.koennecke@psi.ch>
# Michele Brambilla <michele.brambilla@psi.ch>
#
# *****************************************************************************
from nicos import session
from nicos.core import multiStatus, status
from nicos.core.device import Attach, Moveable, Override
from nicos.devices.abstract import Motor
class InterfaceLogicalMotorHandler(Moveable):
"""
This is the interface for a generic logical motor handler.
Subclasses have to implement doRead() and _get_move_list()
doRead() is different in that it returns a dictionary of
motortype: position entries.
Another thing a subclass has to define is self._status_devs which is
a list of those devices whose status needs to be queried in doStatus().
This may be different from the list of attached devices when logical
motors operate on conditional components.
"""
parameter_overrides = {
'fmtstr': Override(volatile=True),
'unit': Override(mandatory=False, default='degree'),
}
status_to_msg = {
status.ERROR: 'Error in %s',
status.BUSY: 'Moving: %s ...',
status.WARN: 'Warning in %s',
status.NOTREACHED: '%s did not reach target!',
status.UNKNOWN: 'Unknown status in %s!',
status.OK: 'Ready.'
}
def doPreinit(self, mode):
self._logical_motors = {}
self._motortypes = []
self.valuetype = {}
def register(self, motortype, motor):
self._motortypes.append(motortype)
self._logical_motors[motortype] = motor
def doReadFmtstr(self):
return ', '.join([mt + '=%(' + mt + ').3f' for mt in self._motortypes])
def _get_dev(self, dev):
return getattr(self, '_attached_%s' % dev, None)
def _read_dev(self, dev):
dev = self._get_dev(dev)
return dev.read(0) if dev else 0.0
def _is_active(self, component):
return component in session.loaded_setups
def _getWaiters(self):
devs = {dev: self._get_dev(dev) for dev in self._status_devs
if self._get_dev(dev)}
return devs
def doStatus(self, maxage=0):
# Check for error and warning in the dependent devices
devs = self._getWaiters()
st_devs = multiStatus(devs, 0)
devs = [n for n, d in devs.items() if d.status()[0] == st_devs[0]]
if st_devs[0] in self.status_to_msg:
msg = self.status_to_msg[st_devs[0]]
if '%' in msg:
msg = msg % ', '.join(devs)
return st_devs[0], msg
return st_devs
def doIsCompleted(self):
# No attached devices, so have to manually check the doIsCompleted
for dev in self._status_devs:
dev = self._get_dev(dev)
if dev and not dev.isCompleted():
return False
return True
def doIsAllowed(self, targets):
# Calculate the possible motor positions using these targets
motor_targets = self._get_move_list(self._get_targets(targets))
# Check if these positions are allowed and populate the
# faults list with the motors that cannot be moved
faults = []
for motor, target in motor_targets:
allowed, _ = motor.isAllowed(target)
if not allowed:
faults.append(motor.name)
self.log.error('%s cant be moved to %s; limits are %s', motor,
motor.format(target, motor.unit),
motor.abslimits)
# Return false if some motors cannot reach their new target
if faults:
return False, '%s not movable!' % ', '.join(faults)
# Return True if everything ok
return True, ''
def doStart(self, targets):
for motor, target in self._get_move_list(self._get_targets(targets)):
self.log.debug('New target for %s: %s', motor,
motor.format(target, motor.unit))
motor.move(target)
def _get_targets(self, targets):
targets_dict = {}
current = self.read(0)
for mt in self._motortypes:
target = targets.get(mt)
if target is None:
# If the target is not valid or not specified, fetch the
# target from motor itself
motor = self._logical_motors.get(mt)
if not motor:
self.log.debug('Missing the logical motor %s! '
'Using target = %s (current position) ',
mt, current[mt])
targets_dict[mt] = current[mt]
elif motor.target is not None:
targets_dict[mt] = round(motor.target or current[mt], 3)
else:
targets_dict[mt] = current[mt]
else:
targets_dict[mt] = round(target, 3)
# Return the dictionary of motortype mapped to their targets
return targets_dict
def _get_move_list(self, targets):
# This is the method to override in order to make something happen
return []
class LogicalMotor(Motor):
"""
Class to represent a general logical motor. The motor type is
always the name of the logical device
"""
parameter_overrides = {
'unit': Override(mandatory=False, default='degree'),
'target': Override(volatile=True),
'abslimits': Override(mandatory=False, default=(-3.0, 3.0)),
'userlimits': Override(mandatory=False, default=(-3.0, 3.0))
}
attached_devices = {
'controller': Attach('Controller for the logical motors',
InterfaceLogicalMotorHandler)
}
def doInit(self, mode):
self._attached_controller.register(self.name, self)
def doRead(self, maxage=0):
return self._attached_controller.doRead(maxage)[self.name]
def doReadTarget(self):
return self._getFromCache('target', self.doRead)
def doStatus(self, maxage=0):
# Check for error and warning in the dependent devices
return self._attached_controller.doStatus(maxage)
def doIsAllowed(self, pos):
return self._attached_controller.doIsAllowed({self.name: pos})
def doIsCompleted(self):
return self._attached_controller.doIsCompleted()
def doStart(self, pos):
self._attached_controller.doStart({self.name: pos})
def doStop(self):
if self.status(0)[0] == status.BUSY:
self._attached_controller.stop()
# Reset the target for this motor
self._setROParam('target', self.doRead(0))
|
from django import template
register = template.Library()
@register.filter
def needs_subtable(value):
"""
Returns True if `value` is a list.
This is used to render service_result data items in a subtable.
"""
return isinstance(value, list)
|
import sys
import subprocess
from string import ascii_letters, digits
from random import choice
all_chs = ascii_letters + digits
def gen_pass(n=8):
# 列表解析,取出n个字符放到列表中
str_list = [choice(all_chs) for i in range(n)]
# 用空串将列表中的字符拼接
return ''.join(str_list)
def add_user(username, password, fname):
info = """用户信息:
用户名:%s
密码:%s
""" % (username, password)
# 首先判断用户是不是已存在
result = subprocess.run('id %s &> /dev/null' % username, shell=True)
if result.returncode == 0:
print('%s已存在' % username)
return # return默认返回None,程序遇到return就结束并返回了
# 创建用户,并设置密码
subprocess.run('useradd %s' % username, shell=True)
subprocess.run(
'echo %s | passwd --stdin %s' % (password, username),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# 将用户信息写入文件
with open(fname, 'a') as fobj:
fobj.write(info)
if __name__ == '__main__':
username = sys.argv[1]
password = gen_pass()
fname = '/tmp/users.txt'
rc = add_user(username, password, fname)
print(rc)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from django.db import models
# from django.db import models
class OptHistory(models.Model):
operator = models.CharField(u'操作用户', max_length=128)
log = models.CharField(u'明细信息', max_length=1000, null=True)
ip_list = models.GenericIPAddressField(u'IP列表')
bk_biz_id = models.CharField(u'业务ID', max_length=16)
bk_biz_name = models.CharField(u'业务名', max_length=16)
job_status = models.IntegerField(u'任务状态', null=True)
opt_at = models.DateTimeField(u'操作时间', auto_now_add=True)
job_id = models.IntegerField(u'任务id', null=True)
def __unicode__(self):
return '{}.{}.{}'.format(self.ip_list,
self.job_id,
self.opt_at)
class Meta:
verbose_name = '操作记录信息'
verbose_name_plural = '操作记录信息'
def toDic(self):
return dict([(attr, getattr(self, attr)) for attr in [f.name for f in self._meta.fields]])
class TaskInfo(models.Model):
task_type = models.CharField(u'任务类型', max_length=256)
script_param = models.CharField(u'脚本参数', max_length=1000, default='')
script_content = models.CharField(u'脚本内容', max_length=10000, null=True)
def __unicode__(self):
return '{}.{}.{}'.format(self.task_type,
self.script_param,
self.script_content)
class Meta:
verbose_name = '任务信息'
verbose_name_plural = '任务信息'
def toDic(self):
return dict([(attr, getattr(self, attr)) for attr in [f.name for f in self._meta.fields]])
|
import argparse
import numpy as np
import json
import random
import keras
import tensorflow as tf
import sklearn.metrics
from scipy.stats import norm as dist_model
from keras import backend as K
def l2ac_predict(model, data, top_n, vote_n=1):
test_X, test_X1=data['test_X0'], data['test_X1']
y_pred=[]
for ix in range(test_X1.shape[1]): #through all candidate classes
if vote_n>1:
n_pred=[]
for jx in range(-vote_n, 0):
n_pred.append(model.predict([test_X, test_X1[:,ix,jx].reshape(-1,1) ] ) )
n_pred=np.concatenate(n_pred, 1)
else:
n_pred=model.predict([test_X, test_X1[:,ix,-top_n:] ] )
y_pred.append( np.expand_dims(n_pred, 1) )
y_pred=np.concatenate(y_pred, 1)
y_pred=y_pred[:,:,-vote_n:].sum(-1)/float(vote_n)
return y_pred
def doc_predict(model, data, model_type):
if "mlp" in model_type:
y_pred=model.predict(data['test_X0'])
else:
y_pred=model.predict(data['test_idx_X0'])
return y_pred
def doc_thres(model, data, model_type, scale = 2.):
train_y = data['train_set_Y']
if "mlp" in model_type:
train_pred = model.predict(data['train_set_X'])
else:
train_pred = model.predict(data['train_set_idx_X'])
#print train_y.shape, train_pred.shape
def fit(prob_pos_X):
prob_pos = [p for p in prob_pos_X]+[2-p for p in prob_pos_X]
pos_mu, pos_std = dist_model.fit(prob_pos)
return pos_std
stds = []
for c in range(train_pred.shape[-1]):
idx = [train_y == c]
c_pred = train_pred[idx]
c_prob = c_pred[:,c]
std = fit(c_prob)
stds.append(std)
thres = [max(0.5, 1. - scale * std) for std in stds]
return thres
def evaluate(y_true, y_pred, thres=0.5, rejection=False, mode="weighted"):
if rejection:
if isinstance(thres, list):
reject_pred = []
for p in y_pred:# loop every test prediction
max_class = np.argmax(p)# predicted class
max_value = np.max(p)# predicted probability
if max_value > thres[max_class]:
reject_pred.append(0)#predicted probability is greater than threshold, accept
else:
reject_pred.append(1)#otherwise, reject
y_pred=np.concatenate([y_pred, np.expand_dims(reject_pred, 1) ], 1)
else:
y_pred=np.concatenate([y_pred, np.expand_dims(y_pred.max(axis=1)<=thres, 1) ], 1)
else:
keep_idx=(y_true!=y_true.max() )
y_pred=y_pred[keep_idx]
y_true=y_true[keep_idx]
y_pred=y_pred.argmax(axis=1)
return sklearn.metrics.f1_score(y_true, y_pred, average=mode), y_true, y_pred
def pred_evaluate(config):
set_modes=config["set_modes"] #["test_25", "test_50", "test_75"]
db=config["db"] #"amazon"
out_dir=config["out_dir"]
model_type=config["model_type"]
doc_eval="DOC" in model_type
if not doc_eval:
top_n=config["top_n"] #10
vote_n=config["vote_n"] #1 #typically 1, we disable manual vote; when top_n=1, we optionally vote
scores={}
for set_mode in set_modes:
data=np.load("../"+db+"/data/"+set_mode+"_idx.npz")
sess=tf.Session()
K.set_session(sess)
if doc_eval:
model_fn=out_dir+"eval_"+set_mode+".h5"
model=keras.models.load_model(model_fn)
else:
model_fn=out_dir+"eval.h5"
model=keras.models.load_model(model_fn)
model.get_layer("embedding_1").set_weights([np.vstack([data['train_rep'], np.zeros((90000, 512))]) ])
thres=0.5
if doc_eval:
y_pred=doc_predict(model, data, model_type)
gaus_thres=doc_thres(model, data, model_type)
else:
y_pred=l2ac_predict(model, data, top_n, vote_n)
weighted_f1, _, _=evaluate(data['test_Y'], y_pred, thres=thres, rejection=True, mode="weighted")
macro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=thres, rejection=True, mode="macro")
micro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=thres, rejection=True, mode="micro")
scores[set_mode]={'weighted_f1': weighted_f1, 'macro_f1': macro_f1, 'micro_f1': micro_f1}
if doc_eval:
weighted_f1, _, _=evaluate(data['test_Y'], y_pred, thres=gaus_thres, rejection=True, mode="weighted")
macro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=gaus_thres, rejection=True, mode="macro")
micro_f1, _, _=evaluate(data['test_Y'], y_pred, thres=gaus_thres, rejection=True, mode="micro")
scores[set_mode+"_gaus"]={'weighted_f1': weighted_f1, 'macro_f1': macro_f1, 'micro_f1': micro_f1}
K.clear_session()
with open(out_dir+"eval.json", "w") as fw:
json.dump(scores, fw)
parser = argparse.ArgumentParser(description="Evaluation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('config', type=str)
if __name__ == '__main__':
args = parser.parse_args()
with open(args.config) as f:
config=json.load(f)
pred_evaluate(config)
|
# Copyright 2020 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
from __future__ import annotations
import abc
import datetime as datetime_module
from typing import Iterable, Mapping, Union, Type, Optional, Any
import itertools
import functools
import filelock
import re
import collections.abc
import contextlib
from ..jamming import BaseJamDatabase, Jam, JamId, JamItem, JamKind, JamParchment, JamFileDatabase
from . import utils
from .exceptions import EmptyJam
from ..file_locking import FileLock
class SwankDatabase:
def __init__(self, jam_database: BaseJamDatabase) -> None:
self.jam_database = jam_database
@classmethod
def create_ethereal(cls) -> SwankDatabase:
return cls(JamFileDatabase.create_ethereal())
def get_jam_kind(self, arg: Union[Swank, Type[Swank], str]) -> JamKind:
fixed_arg = type(arg) if isinstance(arg, Swank) else arg
return self.jam_database[fixed_arg]
def get_jam_item(self, swank: Swank) -> JamKind:
assert swank.has_jam_id_and_index
return self.get_jam_kind(swank)[swank.jam_id][swank.jam_index]
def get_swank_type(self, arg: Union[Swank, Type[Swank], str]) -> JamKind:
if isinstance(arg, str):
jam_kind_name = arg
return utils.name_to_type(jam_kind_name)
else:
if isinstance(arg, Swank):
return type(arg)
elif issubclass(arg, Swank):
return arg
def load_swank(self, swank_type_or_name: Union[Type[Swank], str], jam_id: Union[JamId, str],
jam_index: int) -> Swank:
jam_kind = self.get_jam_kind(swank_type_or_name)
swank_type = self.get_swank_type(swank_type_or_name)
jam_item = jam_kind[jam_id][jam_index]
jam = jam_item.read_jam()
if jam is None:
raise EmptyJam
return swank_type._Swank__from_jam(jam, jam_id=jam_id, jam_index=jam_index,
swank_database=self)
def save_swank(self, swank: Swank) -> None:
if not swank.has_jam_id_and_index:
swank.jam_id = JamId.create(block_size=swank.default_block_size)
swank.jam_index = 0
jam_item = self.get_jam_item(swank)
jam = swank._Swank__to_jam()
jam_item.write_jam(jam)
def iterate_latest(self, swank_type: Type[Swank]) -> Iterable[Swank]:
return swank_type.iterate_latest(self)
def _reduce(self) -> tuple:
return (type(self), self.jam_database)
def __eq__(self, other: Any) -> bool:
return (type(other) == type(self)) and (self._reduce() == other._reduce())
def __hash__(self) -> int:
return hash(self._reduce())
def __repr__(self) -> str:
return f'{type(self).__name__}({self.jam_database})'
class SwankType(abc.ABCMeta):
def __new__(mcls, name, bases, dict_) -> Type[Swank]:
cls = abc.ABCMeta.__new__(mcls, name, bases, dict_)
if len(cls.mro()) >= 3: # i.e. it's not the base `Swank` type, which doesn't have fields
cls._Swank__fields = mcls.__get_fields(cls)
return cls
def __get_fields(cls) -> dict:
fields = {}
existing_names = set()
for type_ in cls.mro():
for name, value in vars(type_).items():
if name in existing_names:
# This attribute was already overridden in a subclass, if it's a field we don't
# want to include it.
continue
existing_names.add(name)
if name.startswith('__'):
continue
if isinstance(value, BaseField):
assert name not in fields
fields[name] = value
return fields
def iterate_latest(cls, swank_database: SwankDatabase) -> Iterable[Swank]:
jam_parchments_by_latest = sorted(
swank_database.get_jam_kind(cls),
key=lambda jam_parcment: jam_parcment._get_path().stat().st_mtime,
reverse=True,
)
for jam_parchment in jam_parchments_by_latest:
for i in reversed(range(len(jam_parchment))):
yield swank_database.load_swank(cls, jam_parchment.jam_id, i)
def get_last(cls, swank_database: SwankDatabase) -> Swank:
try:
return next(cls.iterate_latest(swank_database))
except StopIteration:
raise IndexError
def get_by_name(cls, swank_database: SwankDatabase, name_substring: str) -> Swank:
for swank in cls.iterate_latest(swank_database):
if name_substring in str(swank.jam_id):
return swank
raise LookupError
class Swank(metaclass=SwankType):
default_block_size: int = 1_000
jam_id: Optional[JamId] = None
jam_index: Optional[int] = None
def __init__(self, *, swank_database: SwankDatabase, jam_id: Optional[Union[JamId, str]] = None,
jam_index: Optional[int] = None, **kwargs):
assert (jam_id, jam_index).count(None) in {0, 2}
self.__field_values = {}
self.jam_id = None if jam_id is None else JamId.cast(jam_id)
self.jam_index = jam_index
self.swank_database = swank_database
assert set(kwargs) <= set(self._Swank__fields)
for field_name, field in self.__fields.items():
try:
value = kwargs[field_name]
except KeyError:
value = field.get_default_value(swank_database)
setattr(self, field_name, value)
@property
def has_jam_id_and_index(self):
jam_id_exists = (self.jam_id is not None)
jam_index_exists = (self.jam_index is not None)
assert jam_id_exists == jam_index_exists
return jam_id_exists
@classmethod
def __from_jam(cls, jam: Jam, *, jam_id: JamId, jam_index: int,
swank_database: SwankDatabase) -> Swank:
fields = cls._Swank__fields
swank = cls.__new__(cls)
swank_cache = {(jam_id, jam_index): swank}
### Parsing jam into fields: ###############################################################
# #
kwargs = {}
for full_field_name, value in jam.items():
field_name, field_type_name = full_field_name.split('.')
field = fields[field_name]
assert field_type_name == field.field_type_name
kwargs[field_name] = field.from_jam(value, swank_database=swank_database,
swank_cache=swank_cache)
# #
### Finished parsing jam into fields. ######################################################
cls.__init__(swank, **kwargs,
jam_id=jam_id, jam_index=jam_index, swank_database=swank_database)
return swank
def __to_jam(self) -> Jam:
fields = self._Swank__fields
return {
f'{name}.{field_type.field_type_name}':
field_type.to_jam(getattr(self, name, None), self.swank_database)
for name, field_type in fields.items()
}
@classmethod
def load(cls, swank_database: SwankDatabase, jam_id: Union[JamId, str],
jam_index: int) -> Swank:
return swank_database.load_swank(cls, jam_id=jam_id, jam_index=jam_index)
def reload(self) -> Swank:
return type(self).load(self.swank_database, self.jam_id, self.jam_index)
def save(self, *, all_parchment_fields: bool = True) -> Swank:
from .fields import ParchmentField
self.swank_database.save_swank(self)
if all_parchment_fields:
for field_name, field in self._Swank__fields.items():
if isinstance(field, ParchmentField):
getattr(self, field_name).save()
def _reduce(self) -> tuple:
return (type(self), self.swank_database, self.jam_id, self.jam_index)
def __eq__(self, other: Any) -> bool:
return (
(type(other) == type(self)) and
self.has_jam_id_and_index and
(self._reduce() == other._reduce())
)
def __hash__(self) -> int:
return hash(self._reduce())
def __repr__(self) -> str:
text = ', '.join(
f'{name}={value}' for name, value in self.__field_values.items()
)
return f'<{type(self).__name__}: {text}>'
def to_savvy_content(self) -> tuple:
return (str(self.jam_id), self.jam_index)
@property
@functools.cache
def parchment_lock(self):
jam_item = self.swank_database.get_jam_item(self)
lock_path = jam_item.jam_parchment._get_lock_path()
return FileLock(lock_path)
class SwankRef:
def __init__(self, swank_database: SwankDatabase, swank_type: Type[Swank], jam_id: JamId,
jam_index: int) -> None:
self.swank_database = swank_database
self.swank_type = swank_type
self.swank_type_name = utils.type_to_name(swank_type)
self.jam_kind_name = utils.type_to_name(swank_type)
assert isinstance(jam_id, JamId)
self.jam_id = jam_id
assert isinstance(jam_index, int)
self.jam_index = jam_index
@property
def jam_item(self) -> JamItem:
return self.swank_database.jam_database[self.jam_kind_name][self.jam_id][self.jam_index]
def get(self) -> Swank:
return self.swank_database.load_swank(self.jam_kind_name, self.jam_id, self.jam_index)
@staticmethod
def from_swank_or_ref(swank_or_ref: Optional[Union[Swank, SwankRef]]) -> Optional[SwankRef]:
if swank_or_ref is None:
return None
elif isinstance(swank_or_ref, Swank):
swank = swank_or_ref
assert swank.has_jam_id_and_index
return SwankRef(swank.swank_database, type(swank), swank.jam_id, swank.jam_index)
else:
assert isinstance(swank_or_ref, SwankRef)
return swank_or_ref
def _reduce(self) -> tuple:
return (type(self), self.swank_database, self.swank_type, self.jam_id, self.jam_index)
@property
@functools.cache
def parchment_lock(self):
jam_kind = self.swank_database.get_jam_kind(self.swank_type)
jam_item = jam_kind[self.jam_id][self.jam_index]
lock_path = jam_item.jam_parchment._get_lock_path()
return FileLock(lock_path)
@contextlib.contextmanager
def lock_and_load(self, *, save: bool = False) -> Swank:
with self.parchment_lock:
yield (swank := self.get())
if save:
swank.save()
@contextlib.contextmanager
def lock_and_load_or_create(self, *, save: bool = False) -> Swank:
with self.parchment_lock:
try:
swank = self.get()
except EmptyJam:
swank = self.swank_type(swank_database=self.swank_database,
jam_id=self.jam_id,
jam_index=self.jam_index)
yield swank
if save:
swank.save()
def __repr__(self) -> str:
return f'<{type(self).__name__}: {self.swank_type_name}>'
def __eq__(self, other: Any) -> bool:
return (type(other) == type(self)) and (self._reduce() == other._reduce())
def __hash__(self) -> int:
return hash(self._reduce())
from .fields import BaseField
|
#
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future.builtins import object
import re
from neptune.internal.common.api.exceptions import (
InvalidApiVersionException,
NeptuneConnectionFailedException,
NeptuneServerRequestFailedException,
NeptuneServerResponseErrorException
)
from neptune.internal.common.api.utils import APIErrorCodes
from neptune.server import __version__ as server_api_version
class CheckApiVersion(object):
def __init__(self, utilities_api_service, client_version, config):
self._utilities_api_service = utilities_api_service
self._client_version = client_version
self._config = config
self._check_connection()
@property
def client_version(self):
return self._client_version
def _check_connection(self):
try:
version_info = self._utilities_api_service.get_version()
if version_info.version is None:
raise NeptuneConnectionFailedException(self._config.http_url)
self._check_version_compatibility(version_info.version)
except NeptuneServerResponseErrorException as exc:
# Old backends don't have an endpoint for obtaining version in root.
# Bare backend returns 404 in such a case, but a proxy redirects to /login (302).
if exc.status == APIErrorCodes.NOT_FOUND.value or\
exc.status == APIErrorCodes.MOVED.value:
raise InvalidApiVersionException(
self.client_version,
backend_version='unknown')
else:
raise
except NeptuneServerRequestFailedException:
raise NeptuneConnectionFailedException(self._config.http_url)
def _check_version_compatibility(self, backend_version):
client_api_version = self.extract_api_version(self.client_version)
backend_api_version = self.extract_api_version(backend_version)
if backend_api_version is None:
raise NeptuneConnectionFailedException(self._config.http_url)
elif client_api_version != backend_api_version:
raise InvalidApiVersionException(self.client_version, backend_version)
@staticmethod
def extract_api_version(version_string):
match_result = re.match('\\d+\\.\\d+', version_string)
if match_result:
return match_result.group(0)
else:
return None
@staticmethod
def for_service(utilities_api_service, config):
return CheckApiVersion(utilities_api_service, server_api_version, config)
|
from django.contrib import admin
from .models import Profile, Task
admin.site.register(Profile)
admin.site.register(Task)
|
from flask import render_template
from flask import Response
from flask import Flask
import cv2
app = Flask(__name__)
cap = cv2.VideoCapture(0)
def generate_frame():
while True:
frame = cap.read()
if not frame:
break
else:
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
'''
@app.route('/')
def index():
retrun render_template('streaming2.html')
'''
@app.route('/video_feed')
def video_feed():
return Response(generate_frame(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
app.run(debug=True)
|
def test_rclone_tree3():
import urllib.request as urlreq
from io import StringIO as strio
class PyProtoHandler(urlreq.BaseHandler):
def python_open(self, req):
fullUrl = req.get_full_url()
return strio(fullUrl)
opener = urlreq.build_opener(PyProtoHandler())
urlreq.install_opener(opener)
print(urlreq.urlopen("python://something/random/file.txt").read())
print(len(urlreq.urlopen("http://example.com").read()))
|
# -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Bare-bones implementation of a cache that never stores anything.
This is used to enable compatibility with HTTP applications and stateless
bots where desired.
"""
from __future__ import annotations
__all__: typing.List[str] = ["StatelessCacheImpl"]
import typing
from hikari.api import cache
from hikari.internal import cache as cache_utilities
if typing.TYPE_CHECKING:
from hikari import channels
from hikari import emojis
from hikari import guilds
from hikari import invites
from hikari import messages
from hikari import presences
from hikari import snowflakes
from hikari import users
from hikari import voices
@typing.final
class StatelessCacheImpl(cache.MutableCache):
"""Stateless cache.
A stateless cache implementation that implements dummy operations for
each of the required attributes of a functional cache implementation.R
Any descriptors will always return `builtins.NotImplemented`, and any
methods will always raise `hikari.errors.HikariError` when being invoked.
The only state that _is_ stored will be the bot user, as this is generally
useful information to always know about, and is required for some
functionality such as voice support.
"""
__slots__: typing.Sequence[str] = ("_app", "_me")
def __init__(self) -> None:
self._me: typing.Optional[users.OwnUser] = None
@staticmethod
def _no_cache() -> NotImplementedError:
return NotImplementedError("This application is stateless, cache operations are not implemented.")
def clear_emojis(self) -> cache.CacheView[snowflakes.Snowflake, emojis.KnownCustomEmoji]:
raise self._no_cache()
def clear_emojis_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, emojis.KnownCustomEmoji]:
raise self._no_cache()
def delete_emoji(self, emoji_id: snowflakes.Snowflake, /) -> typing.Optional[emojis.KnownCustomEmoji]:
raise self._no_cache()
def get_emoji(self, emoji_id: snowflakes.Snowflake, /) -> typing.Optional[emojis.KnownCustomEmoji]:
return None
def get_emojis_view(self) -> cache.CacheView[snowflakes.Snowflake, emojis.KnownCustomEmoji]:
return cache_utilities.EmptyCacheView()
def get_emojis_view_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, emojis.KnownCustomEmoji]:
return cache_utilities.EmptyCacheView()
def set_emoji(self, emoji: emojis.KnownCustomEmoji, /) -> None:
raise self._no_cache()
def update_emoji(
self, emoji: emojis.KnownCustomEmoji, /
) -> typing.Tuple[typing.Optional[emojis.KnownCustomEmoji], typing.Optional[emojis.KnownCustomEmoji]]:
raise self._no_cache()
def clear_guilds(self) -> cache.CacheView[snowflakes.Snowflake, guilds.GatewayGuild]:
raise self._no_cache()
def delete_guild(self, guild_id: snowflakes.Snowflake, /) -> typing.Optional[guilds.GatewayGuild]:
raise self._no_cache()
def get_guild(self, guild_id: snowflakes.Snowflake, /) -> typing.Optional[guilds.GatewayGuild]:
return None
def get_available_guild(self, guild_id: snowflakes.Snowflake, /) -> typing.Optional[guilds.GatewayGuild]:
return None
def get_unavailable_guild(self, guild_id: snowflakes.Snowflake) -> typing.Optional[guilds.GatewayGuild]:
return None
def get_available_guilds_view(self) -> cache.CacheView[snowflakes.Snowflake, guilds.GatewayGuild]:
return cache_utilities.EmptyCacheView()
def get_unavailable_guilds_view(self) -> cache.CacheView[snowflakes.Snowflake, guilds.GatewayGuild]:
return cache_utilities.EmptyCacheView()
def set_guild(self, guild: guilds.GatewayGuild, /) -> None:
raise self._no_cache()
def set_guild_availability(self, guild_id: snowflakes.Snowflake, is_available: bool, /) -> None:
raise self._no_cache()
def update_guild(
self, guild: guilds.GatewayGuild, /
) -> typing.Tuple[typing.Optional[guilds.GatewayGuild], typing.Optional[guilds.GatewayGuild]]:
raise self._no_cache()
def clear_guild_channels(self) -> cache.CacheView[snowflakes.Snowflake, channels.GuildChannel]:
raise self._no_cache()
def clear_guild_channels_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, channels.GuildChannel]:
raise self._no_cache()
def delete_guild_channel(self, channel_id: snowflakes.Snowflake, /) -> typing.Optional[channels.GuildChannel]:
raise self._no_cache()
def get_guild_channel(self, channel_id: snowflakes.Snowflake, /) -> typing.Optional[channels.GuildChannel]:
return None
def get_guild_channels_view(self) -> cache.CacheView[snowflakes.Snowflake, channels.GuildChannel]:
return cache_utilities.EmptyCacheView()
def get_guild_channels_view_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, channels.GuildChannel]:
return cache_utilities.EmptyCacheView()
def set_guild_channel(self, channel: channels.GuildChannel, /) -> None:
raise self._no_cache()
def update_guild_channel(
self, channel: channels.GuildChannel, /
) -> typing.Tuple[typing.Optional[channels.GuildChannel], typing.Optional[channels.GuildChannel]]:
raise self._no_cache()
def clear_invites(self) -> cache.CacheView[str, invites.InviteWithMetadata]:
raise self._no_cache()
def clear_invites_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[str, invites.InviteWithMetadata]:
raise self._no_cache()
def clear_invites_for_channel(
self, guild_id: snowflakes.Snowflake, channel_id: snowflakes.Snowflake, /
) -> cache.CacheView[str, invites.InviteWithMetadata]:
raise self._no_cache()
def delete_invite(self, code: str, /) -> typing.Optional[invites.InviteWithMetadata]:
raise self._no_cache()
def get_invite(self, code: str, /) -> typing.Optional[invites.InviteWithMetadata]:
return None
def get_invites_view(self) -> cache.CacheView[str, invites.InviteWithMetadata]:
return cache_utilities.EmptyCacheView()
def get_invites_view_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[str, invites.InviteWithMetadata]:
return cache_utilities.EmptyCacheView()
def get_invites_view_for_channel(
self, guild_id: snowflakes.Snowflake, channel_id: snowflakes.Snowflake, /
) -> cache.CacheView[str, invites.InviteWithMetadata]:
return cache_utilities.EmptyCacheView()
def set_invite(self, invite: invites.InviteWithMetadata, /) -> None:
raise self._no_cache()
def update_invite(
self, invite: invites.InviteWithMetadata, /
) -> typing.Tuple[typing.Optional[invites.InviteWithMetadata], typing.Optional[invites.InviteWithMetadata]]:
raise self._no_cache()
def delete_me(self) -> typing.Optional[users.OwnUser]:
cached_me = self._me
self._me = None
return cached_me
def get_me(self) -> typing.Optional[users.OwnUser]:
return self._me
def set_me(self, user: users.OwnUser, /) -> None:
self._me = user
def update_me(
self, user: users.OwnUser, /
) -> typing.Tuple[typing.Optional[users.OwnUser], typing.Optional[users.OwnUser]]:
cached_me = self.get_me()
self.set_me(user)
return cached_me, self.get_me()
def clear_members(
self,
) -> cache.CacheView[snowflakes.Snowflake, cache.CacheView[snowflakes.Snowflake, guilds.Member]]:
raise self._no_cache()
def clear_members_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, guilds.Member]:
raise self._no_cache()
def delete_member(
self, guild_id: snowflakes.Snowflake, user_id: snowflakes.Snowflake, /
) -> typing.Optional[guilds.Member]:
raise self._no_cache()
def get_member(
self, guild_id: snowflakes.Snowflake, user_id: snowflakes.Snowflake, /
) -> typing.Optional[guilds.Member]:
return None
def get_members_view(
self,
) -> cache.CacheView[snowflakes.Snowflake, cache.CacheView[snowflakes.Snowflake, guilds.Member]]:
return cache_utilities.EmptyCacheView()
def get_members_view_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, guilds.Member]:
return cache_utilities.EmptyCacheView()
def set_member(self, member: guilds.Member, /) -> None:
raise self._no_cache()
def update_member(
self, member: guilds.Member, /
) -> typing.Tuple[typing.Optional[guilds.Member], typing.Optional[guilds.Member]]:
raise self._no_cache()
def clear_presences(
self,
) -> cache.CacheView[snowflakes.Snowflake, cache.CacheView[snowflakes.Snowflake, presences.MemberPresence]]:
raise self._no_cache()
def clear_presences_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, presences.MemberPresence]:
raise self._no_cache()
def delete_presence(
self, guild_id: snowflakes.Snowflake, user_id: snowflakes.Snowflake, /
) -> typing.Optional[presences.MemberPresence]:
raise self._no_cache()
def get_presence(
self, guild_id: snowflakes.Snowflake, user_id: snowflakes.Snowflake, /
) -> typing.Optional[presences.MemberPresence]:
return None
def get_presences_view(
self,
) -> cache.CacheView[snowflakes.Snowflake, cache.CacheView[snowflakes.Snowflake, presences.MemberPresence]]:
return cache_utilities.EmptyCacheView()
def get_presences_view_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, presences.MemberPresence]:
return cache_utilities.EmptyCacheView()
def set_presence(self, presence: presences.MemberPresence, /) -> None:
raise self._no_cache()
def update_presence(
self, presence: presences.MemberPresence, /
) -> typing.Tuple[typing.Optional[presences.MemberPresence], typing.Optional[presences.MemberPresence]]:
raise self._no_cache()
def clear_roles(self) -> cache.CacheView[snowflakes.Snowflake, guilds.Role]:
raise self._no_cache()
def clear_roles_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, guilds.Role]:
raise self._no_cache()
def delete_role(self, role_id: snowflakes.Snowflake, /) -> typing.Optional[guilds.Role]:
raise self._no_cache()
def get_role(self, role_id: snowflakes.Snowflake, /) -> typing.Optional[guilds.Role]:
return None
def get_roles_view(self) -> cache.CacheView[snowflakes.Snowflake, guilds.Role]:
return cache_utilities.EmptyCacheView()
def get_roles_view_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, guilds.Role]:
return cache_utilities.EmptyCacheView()
def set_role(self, role: guilds.Role, /) -> None:
raise self._no_cache()
def update_role(
self, role: guilds.Role, /
) -> typing.Tuple[typing.Optional[guilds.Role], typing.Optional[guilds.Role]]:
raise self._no_cache()
def clear_users(self) -> cache.CacheView[snowflakes.Snowflake, users.User]:
raise self._no_cache()
def delete_user(self, user_id: snowflakes.Snowflake, /) -> typing.Optional[users.User]:
raise self._no_cache()
def get_user(self, user_id: snowflakes.Snowflake, /) -> typing.Optional[users.User]:
return None
def get_users_view(self) -> cache.CacheView[snowflakes.Snowflake, users.User]:
return cache_utilities.EmptyCacheView()
def set_user(self, user: users.User, /) -> None:
raise self._no_cache()
def update_user(
self, user: users.User, /
) -> typing.Tuple[typing.Optional[users.User], typing.Optional[users.User]]:
raise self._no_cache()
def clear_voice_states(
self,
) -> cache.CacheView[snowflakes.Snowflake, cache.CacheView[snowflakes.Snowflake, voices.VoiceState]]:
raise self._no_cache()
def clear_voice_states_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, voices.VoiceState]:
raise self._no_cache()
def clear_voice_states_for_channel(
self, guild_id: snowflakes.Snowflake, channel_id: snowflakes.Snowflake
) -> cache.CacheView[snowflakes.Snowflake, voices.VoiceState]:
raise self._no_cache()
def delete_voice_state(
self, guild_id: snowflakes.Snowflake, user_id: snowflakes.Snowflake, /
) -> typing.Optional[voices.VoiceState]:
raise self._no_cache()
def get_voice_state(
self, guild_id: snowflakes.Snowflake, user_id: snowflakes.Snowflake, /
) -> typing.Optional[voices.VoiceState]:
return None
def get_voice_states_view(
self,
) -> cache.CacheView[snowflakes.Snowflake, cache.CacheView[snowflakes.Snowflake, voices.VoiceState]]:
return cache_utilities.EmptyCacheView()
def get_voice_states_view_for_channel(
self, guild_id: snowflakes.Snowflake, channel_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, voices.VoiceState]:
return cache_utilities.EmptyCacheView()
def get_voice_states_view_for_guild(
self, guild_id: snowflakes.Snowflake, /
) -> cache.CacheView[snowflakes.Snowflake, voices.VoiceState]:
return cache_utilities.EmptyCacheView()
def set_voice_state(self, voice_state: voices.VoiceState, /) -> None:
raise self._no_cache()
def update_voice_state(
self, voice_state: voices.VoiceState, /
) -> typing.Tuple[typing.Optional[voices.VoiceState], typing.Optional[voices.VoiceState]]:
raise self._no_cache()
def delete_message(
self, message_id: snowflakes.Snowflake
) -> None:
raise self._no_cache()
def delete_messages(
self, message_ids: typing.Sequence[snowflakes.Snowflake]
) -> None:
raise self._no_cache()
def get_message(
self, message_id: snowflakes.Snowflake
) -> typing.Optional[messages.PartialMessage]:
return None
def set_message(
self, message: messages.PartialMessage
) -> None:
raise self._no_cache()
def update_message(
self, message: messages.PartialMessage
) -> typing.Tuple[typing.Optional[messages.PartialMessage], typing.Optional[messages.PartialMessage]]:
raise self._no_cache()
|
from __future__ import unicode_literals
# TODO add tests for all of these
COMPARISON_FUNCS = {
'EQ': lambda item_value, test_value: item_value == test_value,
'NE': lambda item_value, test_value: item_value != test_value,
'LE': lambda item_value, test_value: item_value <= test_value,
'LT': lambda item_value, test_value: item_value < test_value,
'GE': lambda item_value, test_value: item_value >= test_value,
'GT': lambda item_value, test_value: item_value > test_value,
'NULL': lambda item_value: item_value is None,
'NOT_NULL': lambda item_value: item_value is not None,
'CONTAINS': lambda item_value, test_value: test_value in item_value,
'NOT_CONTAINS': lambda item_value, test_value: test_value not in item_value,
'BEGINS_WITH': lambda item_value, test_value: item_value.startswith(test_value),
'IN': lambda item_value, *test_values: item_value in test_values,
'BETWEEN': lambda item_value, lower_test_value, upper_test_value: lower_test_value <= item_value <= upper_test_value,
}
def get_comparison_func(range_comparison):
return COMPARISON_FUNCS.get(range_comparison)
|
from datetime import date
from django.db import models
from hth.core.models import PublishedModel, PublishedQuerySet
class Venue(models.Model):
"""
Stores a club, bar, festival, etc.
"""
name = models.CharField(max_length=200)
city = models.CharField(max_length=200)
website = models.URLField(blank=True)
address = models.CharField(blank=True, max_length=200)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
ordering = ['name', 'city']
def __str__(self):
return '{}, {}'.format(self.name, self.city)
class GigQuerySet(PublishedQuerySet):
"""
Provides additional filters for ``Gig``.
"""
def published(self):
"""
Returns a ``QuerySet`` of published objects with related ``Venue``'s.
"""
return super().published().select_related('venue')
def upcoming(self):
"""
Returns a ``QuerySet`` of future ``Gig``'s in ascending order.
"""
return self.filter(date__gte=date.today()).reverse()
def past(self):
"""
Returns a ``QuerySet`` of past ``Gig``'s in descending order.
"""
return self.filter(date__lt=date.today())
class Gig(PublishedModel):
"""
Stores a show, aka concert.
"""
date = models.DateField()
venue = models.ForeignKey(Venue)
description = models.TextField(
blank=True, help_text="Type of gig, band line-up, video links, etc.")
details = models.TextField(
blank=True, help_text="Start time, cost, ticket and venue links, etc.")
objects = GigQuerySet.as_manager()
class Meta:
ordering = ['-date']
def __str__(self):
return '{}, {}'.format(self.date, self.venue)
|
import datetime
import os
import string, random
import re
from art import *
from colorama import Fore
from timeit import default_timer as timer
#opening screen
ascii_art = text2art(text='Password Generator') #declare ascii art
os.system('Password Generator') #title
os.system('cls') #clear the screen
print(f"{Fore.MAGENTA}{ascii_art}{Fore.RESET}") #show ascii art
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
while True:
try:
amount = int(input('Amount of passwords to generate: '))
option = str(input('How complicated would you like the password to be?(1-3): ').lower())
characters = int(input('Amount of characters: '))
passwords = []
start = timer()
for i in range(amount):
try:
if option == '1' or option == 'option 1':
result_str = ''.join(random.choice(string.ascii_letters) for i in range(characters))
print('Password: ' + result_str)
passwords.append(result_str)
elif option == '2' or option == 'option 2':
allchars = string.ascii_letters + string.digits
result_str = ''.join(random.choice(allchars) for i in range(characters))
print('Password: ' + result_str)
passwords.append(result_str)
elif option == '3' or option == 'option 3':
allchars = string.ascii_letters + string.digits + string.punctuation
result_str = ''.join(random.choice(allchars) for i in range(characters))
print('Password: ' + result_str)
passwords.append(result_str)
else:
raise Exception('Invalid response.')
pass
except Exception as e:
print(f"Error: {e}")
continue
end = timer()
print(f"Took {end-start} seconds to generate {amount:,} passwords.")
except:
print('Invalid option, try again.')
continue
try:
option = str(input('Would you like to save the passwords?(Y/N): ').lower())
if option == 'y':
try:
path = str(input('The path to save the password(s) to: '))
if amount < 6:
for i in range(amount):
email = str(input('Email: '))
if(re.fullmatch(regex, email)):
name = str(input('Name/What is the password for: '))
with open(f"{path}\{name}.txt", 'w') as file:
file.write(f"Email: {email} \n Password: {passwords[i]}")
print(f"Password for {name} saved.")
file.close()
else:
print('Invalid email.')
else:
start = timer()
with open(f"{path}\passwords.txt", 'w') as file:
for element in passwords:
file.write(f"{element} \n")
file.close()
end = timer()
print('Passwords saved.')
print(f"Took {end-start} seconds to save passwords to {path}\passwords.txt")
except Exception as e:
print(f'Error: {e}')
pass
elif option == 'n':
print('Passwords not saved.')
continue
else:
raise Exception('Invalid response.')
pass
except Exception as e:
print(f"Error: {e}")
pass
|
from django.apps import AppConfig
class UsersAppConfig(AppConfig):
name = 'users_app'
|
from .resource import Resource
from .account import Account
from .database import Database
from .document import Document
from .design import Design
from .attachment import Attachment
from .index import Index
import warnings
message = """
*********************************************
`cloudant` Deprecation Warning
You are using version 0.5.10 of this library,
which is now deprecated. It will be replaced
with version 2.0.0 in early 2016. This will
introduce breaking changes. Please upgrade as
soon as possible. Find out more at
https://github.com/cloudant/python-cloudant
*********************************************
"""
#we don't use DeprecationWarning because that message is ignored by default
warnings.warn(message)
|
SECRET_KEY = 'david_secret'
APP_NAME = 'Moby Dock'
DEBUG = True
Testing = False
SQLALCHEMY_DATABASE_URI = 'postgresql://mobydock:12345678@postgres:5432/mobydock'
REDIS_URL = 'redis://redis:6379/0'
|
def test_import():
import manim
|
import json
import shutil
from mido import MidiFile
import mido
import midi
import os
import csv
from math import sqrt
#variables to be adjusted
spacing_difficulty = 0.25
spacing_cap = 1.8
csvfile = open('output.csv', 'w', newline='', encoding="utf8")
fieldnames = ['Map Title', 'Difficulty', 'Difficulty Rating', "BPM", "Author"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'Map Title': "Song Name", 'Difficulty': "Difficulty", 'Difficulty Rating': "Difficulty Rating", 'Author': "Author", "BPM": "BPM"})
def calculateAudicaMap(filename):
filename = filename
cueFiles = []
#reading zipfile
from zipfile import ZipFile
audicafile = ZipFile(filename, mode='r')
#print(audicafile.namelist())
#extract files
for item in audicafile.namelist():
if item.find('.mid') > 0:
audicafile.extract(item, path="./temp/")
midiname = item
#get desc
for item in audicafile.namelist():
if item.find('.desc') > 0:
audicafile.extract(item, path="./temp/")
with open ("./temp/" + item) as desc:
mapdesc = json.load(desc)
MapTitle = mapdesc["artist"] + " - " + mapdesc["title"].split("<size")[0]
author = mapdesc.get("author", "HMX")
print("Map: " + MapTitle)
print("Author: " + author)
#get BPM
mid = MidiFile("./temp/" + midiname)
tempo = -1
for i, track in enumerate(mid.tracks):
for msg in track:
if msg.type == 'set_tempo':
tempo = msg.tempo
#empty tempo handling
if tempo == -1:
tempo = mapdesc["tempo"]
#get tempos
def get_tempos_from_midi(midi_file):
pattern = midi.read_midifile("./temp/" + midiname)
tick = 0
temposList = []
for track in pattern:
for event in track:
if type(event) is midi.SetTempoEvent:
tick += event.tick
temposList.append({
"tick": tick,
"tempo": event.get_bpm()
})
#empty tempo handling
if not temposList:
temposList.append({
"tick": 0,
"tempo": tempo
})
# for i, track in enumerate(pattern.tracks):
# for msg in track:
# if msg.type == 'set_tempo':
# tempos.append({
# "tick": msg.tick,
# "tempo": msg.tempo
# })
return temposList
print("Audica BPM: " + str(round(mido.tempo2bpm(tempo), 2)) + "\n")
def getDifficultyRating(difficultyName):
with open ("./temp/" + difficultyName) as map:
audicamap = json.load(map)
cues = audicamap["cues"]
midiForTempo = MidiFile("./temp/" + midiname)
tempos = get_tempos_from_midi(midiForTempo)
# print("previous last cue: " + str(cues[-1]["tick"]) + " at " + str(tempo))
# mapLength = cues[-1]["tick"] / 480 * tempo / 1000000
objectCount = 0
calculatedObjects = 0
leftHand = []
rightHand = []
anyHand = []
finalCues = []
def getTrueCoordinates(cue):
pitch = cue["pitch"]
x = pitch % 12
y = int(pitch / 12)
cue["trueX"] = x + cue["gridOffset"]["x"]
cue["trueY"] = y + cue["gridOffset"]["y"]
for item in cues:
getTrueCoordinates(item)
def getObjectDifficulty(object):
difficulty = 0
cueSpacing = object.get("spacing", 0) * spacing_difficulty
# cap spacing difficulty weight
if ( cueSpacing > spacing_cap):
print("beeg spacing alert beeg spacing alert: " + str(cueSpacing))
cueSpacing = spacing_cap
if object["behavior"] == 0: #normal circle
difficulty = 1 + cueSpacing
elif object["behavior"] == 1: #vertical object
difficulty = 1.2 + cueSpacing
elif object["behavior"] == 2: #horizontal object
difficulty = 1.3 + cueSpacing
elif object["behavior"] == 3: #sustain
difficulty = 1 + cueSpacing
elif object["behavior"] == 4: #chain start
difficulty = 1.2 + cueSpacing
elif object["behavior"] == 5: #chain node
difficulty = 0.2
elif object["behavior"] == 6: #melee
difficulty = 0.6
return difficulty
#divide the hand types into their own lists
for item in cues:
if item["handType"] == 1:
rightHand.append(item)
elif item["handType"] == 2:
leftHand.append(item)
else:
anyHand.append(item)
for x,y in zip(leftHand[::],leftHand[1::]):
# print(abs(y["gridOffset"]["x"] - x["gridOffset"]["x"]))
# print(y["tick"], x["tick"])
y["spacing"] = sqrt( (y["trueX"] - x["trueX"])**2 + (y["trueY"] - x["trueY"])**2 )
for x,y in zip(rightHand[::],rightHand[1::]):
# print(abs(y["gridOffset"]["x"] - x["gridOffset"]["x"]))
# print(y["tick"], x["tick"])
y["spacing"] = sqrt( (y["trueX"] - x["trueX"])**2 + (y["trueY"] - x["trueY"])**2 )
finalCues = leftHand + rightHand + anyHand
finalCuesSorted = sorted(finalCues, key=lambda k: k['tick'])
'''
with open("debug" + difficultyName + '.cues', 'w') as outfile:
json.dump(cues, outfile, indent=4)
'''
for item in finalCuesSorted:
if item["behavior"] != 5:
objectCount += 1
calculatedObjects += getObjectDifficulty(item)
#function to calculate time between given cues
def get_delta_time(cue):
#tempos = self.tempos
if len(tempos) > 1:
# print(str(cue["tick"]))
tick = cue["tick"]
time = 0
last_tempo = 0
last_tick = 0
for tempo in tempos:
bpm = tempo["tempo"]
t = tempo["tick"]
if t != 0:
if tick >= t:
tick_time = 60000 / (last_tempo * 480)
tick_count = t - last_tick
time = time + (tick_time * tick_count)
last_tempo = bpm
last_tick = t
else:
break
else:
last_tempo = bpm
difference = tick - last_tick
if difference != 0:
tick_time = 60000 / (last_tempo * 480)
time = time + (tick_time * difference)
return time
else:
# print("ELSE get_delta_time tempos: " + str(mido.bpm2tempo(tempos[0]["tempo"])))
# print("cue tick: " + str(cue["tick"]))
return cue["tick"] / 480 * mido.bpm2tempo(tempos[0]["tempo"]) /1000
#/ 1000000
#get first and last cues
firstCueTime = round(get_delta_time(finalCuesSorted[0]), 2)
lastCueTime = round(get_delta_time(finalCuesSorted[-1]), 2)
# print("1st: " + str(firstCueTime))
# print("Last: " + str(lastCueTime))
# print("prev mapLength: " + str(mapLength))
mapLength = (lastCueTime - firstCueTime) / 1000
# print("cur mapLength: " + str(round(mapLength, 2)))
NPS = round((objectCount / mapLength), 2)
StarRating = str( round((calculatedObjects / mapLength), 2))
diffname = difficultyName.capitalize().replace(".cues", "")
print("Difficulty: " + diffname)
print( "Object count: " + str(objectCount) )
print( "NPS: " + str( NPS ) )
print( "Weighted objects: " + str( round(calculatedObjects, 2 )) )
print( "Difficulty Rating: " + StarRating )
print("")
writer.writerow({'Map Title': MapTitle, 'Difficulty': diffname, 'Difficulty Rating': StarRating, 'Author': author, "BPM": round(mido.tempo2bpm(tempo), 2)})
return diffname, NPS, StarRating
for item in audicafile.namelist():
if item.find('.cues') > 0:
audicafile.extract(item, path="./temp/")
cueFiles.append(item)
diffname,nps,StarRating = getDifficultyRating(item)
shutil.rmtree("./temp/")
for files in os.listdir("./maps/"):
calculateAudicaMap("./maps/" + files)
|
# -*- coding: utf-8 -*-
# Copyright 2018 Ross Jacobs All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to scrape elements from / input text into an MS page.
This module is half-implemented for demonstration purposes.
"""
from . import page_utils
def ms_get_management_vlan(self):
"""Return the management vlan.
Location: Switches > Switch Settings > VLAN Configuration
Sample HTML:
<input id="node_group_management_vlan" name=
"node_group[management_vlan]" type="text" value="1">
Returns:
(string): The management VLAN for this switch network.
"""
self.open_route('/configure/switch_settings', "Switch")
textarea_value = page_utils.get_input_var_value(
self.get_page(),
var_id='node_group_management_vlan')
return textarea_value
def ms_get_rstp_enabled(self):
"""Return the bool of whether RSTP is enabled.
Location: Switches > Switch Settings > VLAN Configuration
Sample HTML:
<select id="node_group_use_stp" name="node_group[use_stp]">
<option value="true" selected="selected">Enable RSTP</option>
<option value="false">Disable RSTP</option></select>
Returns:
(bool): Whether RSTP is enabled for this switch network.
"""
self.open_route('/configure/switch_settings', "Switch")
dropdown_value = page_utils.get_dropdown_value(
self.get_page(),
var_id='node_group_use_stp')
return dropdown_value == 'Enable RSTP'
|
"""Support for Crow IP Module-based alarm control panel"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.const import EVENT_HOMEASSISTANT_STOP, CONF_TIMEOUT, CONF_HOST
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
_LOGGER = logging.getLogger(__name__)
DOMAIN = "crowipmodule"
DATA_CRW = "crowipmodule"
CONF_CODE = "code"
CONF_CROW_KEEPALIVE = "keepalive_interval"
CONF_CROW_PORT = "port"
CONF_AREANAME = "name"
CONF_AREAS = "areas"
CONF_ZONENAME = "name"
CONF_ZONES = "zones"
CONF_ZONETYPE = "type"
CONF_OUTPUTS = "outputs"
CONF_OUTPUTNAME = "name"
DEFAULT_PORT = 5002
DEFAULT_KEEPALIVE = 60
DEFAULT_ZONETYPE = "opening"
DEFAULT_TIMEOUT = 10
SIGNAL_ZONE_UPDATE = "crowipmodule.zones_updated"
SIGNAL_AREA_UPDATE = "crowipmodule.areas_updated"
SIGNAL_SYSTEM_UPDATE = "crowipmodule.system_updated"
SIGNAL_OUTPUT_UPDATE = "crowipmodule.output_updated"
SIGNAL_KEYPAD_UPDATE = "crowipmodule.keypad_updated"
OUTPUT_SCHEMA = vol.Schema(
{
vol.Required(CONF_OUTPUTNAME): cv.string,
}
)
ZONE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONENAME): cv.string,
vol.Optional(CONF_ZONETYPE, default=DEFAULT_ZONETYPE): cv.string,
}
)
AREA_SCHEMA = vol.Schema(
{
vol.Required(CONF_AREANAME): cv.string,
vol.Optional(CONF_CODE, default=''): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ZONES): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_AREAS): {vol.Coerce(int): AREA_SCHEMA},
vol.Optional(CONF_OUTPUTS): {vol.Coerce(int): OUTPUT_SCHEMA},
vol.Optional(CONF_CROW_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_CROW_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(
vol.Coerce(int), vol.Range(min=15)
),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up for Crow IP Module."""
from pycrowipmodule import CrowIPAlarmPanel
conf = config.get(DOMAIN)
host = conf.get(CONF_HOST)
code = '0000'
port = conf.get(CONF_CROW_PORT)
keep_alive = conf.get(CONF_CROW_KEEPALIVE)
zones = conf.get(CONF_ZONES)
areas = conf.get(CONF_AREAS)
outputs = conf.get(CONF_OUTPUTS)
connection_timeout = conf.get(CONF_TIMEOUT)
sync_connect = asyncio.Future()
controller = CrowIPAlarmPanel(
host,
port,
code,
keep_alive,
hass.loop,
connection_timeout,
)
hass.data[DATA_CRW] = controller
@callback
def connection_fail_callback(data):
"""Network failure callback."""
_LOGGER.error("Could not establish a connection with the Crow Ip Module")
if not sync_connect.done():
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_crowipmodule)
sync_connect.set_result(True)
@callback
def connected_callback(data):
"""Handle a successful connection."""
_LOGGER.info("Established a connection with the Crow Ip Module")
if not sync_connect.done():
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_crowipmodule)
sync_connect.set_result(True)
@callback
def zones_updated_callback(data):
"""Handle zone updates."""
_LOGGER.debug("Crow Ip Module sent a zone update event. Updating zones...")
async_dispatcher_send(hass, SIGNAL_ZONE_UPDATE, data)
@callback
def areas_updated_callback(data):
"""Handle area changes thrown by crow (including alarms)."""
_LOGGER.debug("The Crow Ip Module sent an area update event. Updating areas...")
async_dispatcher_send(hass, SIGNAL_AREA_UPDATE, data)
@callback
def system_updated_callback(data):
#Handle system updates.
_LOGGER.debug('Crow Ip Module sent a system update event. Updating system...')
async_dispatcher_send(hass, SIGNAL_SYSTEM_UPDATE, data)
@callback
def output_updated_callback(data):
"""Handle output updates."""
_LOGGER.debug("Crow Ip Module sent an output update event. Updating output...")
async_dispatcher_send(hass, SIGNAL_OUTPUT_UPDATE, data)
@callback
def stop_crowipmodule(event):
"""Shutdown Crow IP Module connection and thread on exit."""
_LOGGER.info("Shutting down CrowIpModule")
controller.stop()
controller.callback_zone_state_change = zones_updated_callback
controller.callback_area_state_change = areas_updated_callback
controller.callback_system_state_change = system_updated_callback
controller.callback_output_state_change = output_updated_callback
controller.callback_connected = connected_callback
controller.callback_login_timeout = connection_fail_callback
_LOGGER.info("Start CrowIpModule.")
controller.start()
result = await sync_connect
if not result:
return False
# Load sub-components for Crow Ip Module
if areas:
hass.async_create_task(
async_load_platform(
hass,
"alarm_control_panel",
"crowipmodule",
{CONF_AREAS: areas},
config,
)
)
hass.async_create_task(
async_load_platform(
hass,
"sensor",
"crowipmodule",
{CONF_AREAS: areas},
config,
)
)
if zones:
hass.async_create_task(
async_load_platform(
hass,
"binary_sensor",
"crowipmodule",
{CONF_ZONES: zones},
config,
)
)
hass.async_create_task(
async_load_platform(
hass,
"switch",
"crowipmodule",
{CONF_OUTPUTS: outputs},
config,
)
)
return True
class CrowIPModuleDevice(Entity):
"""Representation of an Crow IP Module."""
def __init__(self, name, info, controller):
"""Initialize the device."""
self._controller = controller
self._info = info
self._name = name
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
|
from wod_board import seed
from wod_board.models import equipment
from wod_board.models import movement
from wod_board.models import unit
from wod_board.models import user
from wod_board.models import w_type
def test_seed(db):
seed.seed_db()
assert db.query(user.User).count() > 0
assert db.query(unit.Unit).count() > 0
assert db.query(equipment.Equipment).count() > 0
assert db.query(movement.Movement).count() > 0
assert db.query(w_type.WodType).count() > 0
assert db.query(user.User).filter(user.User.username == "admin-bar").first()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2015 Wind River Systems, Inc.
#
import netaddr
from sysinv.common import constants
from sysinv.openstack.common import log
from sysinv.openstack.common.gettextutils import _
LOG = log.getLogger(__name__)
class InvalidProfileData(Exception):
pass
class Network(object):
def __init__(self, node, networkType):
self.networkType = networkType
self.providerNetworks = []
providerNetworksNode = node.find('providerNetworks')
if providerNetworksNode:
for pnetNode in providerNetworksNode.findall('providerNetwork'):
pnetName = pnetNode.get('name')
self.addProviderNetwork(pnetName)
def addProviderNetwork(self, pnet):
if pnet not in self.providerNetworks:
self.providerNetworks.append(pnet)
# ignore if provider network is duplicated within one interface
def validate(self):
if len(self.providerNetworks) == 0:
# caller will do the translation
raise InvalidProfileData("At least one provider network must be selected.")
class DataclassNetwork(Network):
def __init__(self, node):
super(DataclassNetwork, self).__init__(node, constants.NETWORK_TYPE_DATA)
self.ipv4Mode = DataclassNetwork.getIpMode(node, "ipv4")
self.ipv6Mode = DataclassNetwork.getIpMode(node, "ipv6")
self.routes = DataclassNetwork.getRoutes(node)
@staticmethod
def getRoutes(node):
routesNode = node.find('routes')
if routesNode is None:
return []
routes = []
for routeNode in routesNode.findall('route'):
route = {}
route['metric'] = int(routeNode.get('metric'))
network = routeNode.get('network')
gateway = routeNode.get('gateway')
try:
addr = netaddr.IPAddress(gateway)
except netaddr.core.AddrFormatError:
raise InvalidProfileData(_('%s is not a valid IP address') % gateway)
try:
net = netaddr.IPNetwork(network)
except netaddr.core.AddrFormatError:
raise InvalidProfileData(_('%s is not a valid network') % network)
if addr.format() != gateway:
raise InvalidProfileData(_('%s is not a valid IP address') % gateway)
if net.version != addr.version:
raise InvalidProfileData(_('network "%s" and gateway "%s" must be the same version.') %
(network, gateway))
route['network'] = net.network.format()
route['prefix'] = net.prefixlen
route['gateway'] = gateway
route['family'] = net.version
routes.append(route)
return routes
@staticmethod
def getIpMode(node, name):
modeNode = node.find(name)
if modeNode is None:
raise InvalidProfileData(_('%s is required for a datanetwork') % name)
mode = modeNode.get('mode')
pool = None
if mode == 'pool':
poolNode = modeNode.find('pool')
if poolNode is None:
raise InvalidProfileData(_('A pool is required for a %s defined as "pool"') % name)
pool = poolNode.get('name')
return {'mode': mode, 'pool': pool}
class ExternalNetwork(object):
def __init__(self, node, networktype):
self.networkType = networktype
def validate(self):
pass
class PciPassthrough(Network):
def __init__(self, node):
super(PciPassthrough, self).__init__(node, constants.NETWORK_TYPE_PCI_PASSTHROUGH)
class PciSriov(Network):
def __init__(self, node):
super(PciSriov, self).__init__(node, constants.NETWORK_TYPE_PCI_SRIOV)
self.virtualFunctions = int(node.get('virtualFunctions'))
self.virtualFunctionDriver = node.get('virtualFunctionDriver')
class Interface(object):
def __init__(self, ifNode):
self.providerNetworks = []
self.networks = []
self.name = ifNode.get('ifName')
self.mtu = ifNode.get('mtu')
self.ipv4Mode = {'mode': None, 'pool': None}
self.ipv6Mode = {'mode': None, 'pool': None}
self.routes = []
self.virtualFunctions = 0
self.virtualFunctionDriver = None
networksNode = ifNode.find('networks')
if networksNode is not None:
for netNode in networksNode:
self.addNetwork(netNode)
def getNetworkMap(self):
return {}
def addNetwork(self, node):
tag = node.tag
networkMap = self.getNetworkMap()
if tag in networkMap:
network = networkMap[tag](node)
self.networks.append(network)
if network.networkType == constants.NETWORK_TYPE_DATA:
self.ipv4Mode = network.ipv4Mode
self.ipv6Mode = network.ipv6Mode
self.routes = network.routes
elif network.networkType == constants.NETWORK_TYPE_PCI_SRIOV:
self.virtualFunctions = network.virtualFunctions
self.virtualFunctionDriver = network.virtualFunctionDriver
if isinstance(network, Network):
self.providerNetworks = network.providerNetworks
else:
raise InvalidProfileData(_('network type (%s) not recognizable') % tag)
def validate(self):
# raise InvalidProfileData exception with detail msg
numberOfNetworks = len(self.networks)
if numberOfNetworks > 2:
raise InvalidProfileData(_('Too many network types selected for the interface.'))
# when change, make sure modify the displayText as well
combineTypes = [constants.NETWORK_TYPE_MGMT, constants.NETWORK_TYPE_CLUSTER_HOST]
displayText = _('Only mgmt and cluster-host network types can be combined on a single interface')
if numberOfNetworks == 2:
if self.networks[0].networkType not in combineTypes or \
self.networks[1].networkType not in combineTypes:
raise InvalidProfileData(displayText)
if self.networks[0].networkType == self.networks[1].networkType:
raise InvalidProfileData(_('Interface can not combine with 2 networks with the same type.'))
try:
for network in self.networks:
network.validate()
except InvalidProfileData as e:
raise InvalidProfileData(_(e.message + ' Interface: %s') % self.name)
def getNetworks(self):
pnets = ''
networkTypes = ''
hasNT = False
for network in self.networks:
if network.networkType is None:
continue
hasNT = True
if networkTypes:
networkTypes += ','
networkTypes = networkTypes + network.networkType
if hasattr(network, 'providerNetworks'):
# there should be only one network has providerNetwork
for pnet in network.providerNetworks:
if pnets:
pnets += ','
pnets = pnets + pnet
if not hasNT:
networkTypes = None
pnets = None
return networkTypes, pnets
class EthInterface(Interface):
def __init__(self, ifNode):
super(EthInterface, self).__init__(ifNode)
self.port, self.pciAddress, self.pclass, self.pdevice = self.getPort(ifNode)
def getPort(self, ifNode):
portNode = ifNode.find('port')
if portNode is None:
raise InvalidProfileData(_('Ethernet interface %s requires an Ethernet port ') %
ifNode.get('ifName'))
pciAddress = ''
tmp = portNode.get('pciAddress')
try:
pciAddress = EthInterface.formatPciAddress(tmp)
except InvalidProfileData as exc:
raise InvalidProfileData(exc.message + _('Interface %s, pciAddress %s') % (ifNode.get('ifName'), tmp))
pclass = portNode.get('class')
if pclass:
pclass = pclass.strip()
pdevice = portNode.get('device')
if pdevice:
pdevice = pdevice.strip()
return portNode.get('name'), pciAddress, pclass, pdevice
@staticmethod
def formatPciAddress(value):
# To parse a [X]:[X]:[X].[X] formatted pci address into [04x]:[02x]:[02x].[01x] pci address format
if value:
section_list1 = value.split(':')
else:
return ''
if len(section_list1) != 3:
raise InvalidProfileData(_('pciAddress is not well formatted.'))
section_list2 = section_list1[2].split('.')
if len(section_list2) != 2:
raise InvalidProfileData(_('pciAddress is not well formatted.'))
try:
sec1 = int(section_list1[0], 16)
sec2 = int(section_list1[1], 16)
sec3 = int(section_list2[0], 16)
sec4 = int(section_list2[1], 16)
except (TypeError, ValueError):
raise InvalidProfileData(_('pciAddress is not well formatted.'))
result = '{0:04x}:{1:02x}:{2:02x}.{3:01x}'.format(sec1, sec2, sec3, sec4)
return result
def getNetworkMap(self):
return {
'dataclassNetwork': lambda node: DataclassNetwork(node),
'clusterhostNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_CLUSTER_HOST),
'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM),
'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT),
'pciPassthrough': lambda node: PciPassthrough(node),
'pciSriov': lambda node: PciSriov(node)
}
class AeInterface(Interface):
def __init__(self, ifNode):
super(AeInterface, self).__init__(ifNode)
self.usesIf = []
aeModeNode = ifNode.find('aeMode') # aeMode is mandatory required by schema
node = aeModeNode[0] # it is mandatory required by schema
if node.tag == 'activeStandby':
self.aeMode = 'activeStandby'
self.txPolicy = None
elif node.tag == 'balanced':
self.aeMode = 'balanced'
self.txPolicy = node.get('txPolicy')
elif node.tag == 'ieee802.3ad':
self.aeMode = '802.3ad'
self.txPolicy = node.get('txPolicy')
node = ifNode.find('interfaces')
if node:
for usesIfNode in node.findall('interface'):
self.addUsesIf(usesIfNode.get('name'))
def addUsesIf(self, ifName):
if not ifName:
raise InvalidProfileData(_('Interface name value cannot be empty.'))
if ifName == self.name:
raise InvalidProfileData(_('Aggregrated ethernet interface (%s) cannot use itself.') % self.name)
if ifName not in self.usesIf:
self.usesIf.append(ifName)
def getNetworkMap(self):
return {
'dataclassNetwork': lambda node: DataclassNetwork(node),
'clusterhostNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_CLUSTER_HOST),
'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM),
'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT)
}
def validateWithIfNames(self, allInterfaceNames):
# raise InvalidProfileData exception if invalid
if len(self.usesIf) == 0:
msg = _('Aggregrated ethernet interface (%s) should have at least one interface.') % self.name
raise InvalidProfileData(msg)
for usesIfName in self.usesIf:
if usesIfName not in allInterfaceNames:
msg = _('Aggregrated ethernet interface (%s) uses a undeclared interface (%s)') % \
(self.name, usesIfName)
raise InvalidProfileData(msg)
super(AeInterface, self).validate()
class VlanInterface(Interface):
def __init__(self, ifNode):
super(VlanInterface, self).__init__(ifNode)
self.vlanId = int(ifNode.get('vlanId'))
usesIf = ifNode.get('interface')
if not usesIf:
raise InvalidProfileData(_('<usesIf> value cannot be empty.'))
if usesIf == self.name:
raise InvalidProfileData(_('vlan interface (%s) cannot use itself.') % self.name)
self.usesIfName = usesIf
self.usesIf = [usesIf]
def getNetworkMap(self):
return {
'dataclassNetwork': lambda node: DataclassNetwork(node),
'clusterhostNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_CLUSTER_HOST),
'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM),
'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT)
}
@staticmethod
def isEthInterface(ifName, ethIfMap):
return ifName in ethIfMap
def validateWithIfNames(self, allInterfaceNames, aeIfMap, vlanIfMap, ethIfMap):
# raise InvalidProfileData exception if invalid
if self.usesIfName not in allInterfaceNames:
msg = _('vlan interface (%s) uses a undeclared interface (%s)') % \
(self.name, self.usesIfName)
raise InvalidProfileData(msg)
isEthIf = self.isEthInterface(self.usesIfName, ethIfMap)
good = True
if not isEthIf:
ifNameToCheck = [self.usesIfName]
while len(ifNameToCheck) > 0:
ifName = ifNameToCheck.pop(0)
if ifName in aeIfMap:
aeIf = aeIfMap[ifName]
for n in aeIf.usesIf:
ifNameToCheck.append(n)
elif ifName in vlanIfMap:
good = False
break # not good,a vlan in uses tree
if not good:
raise InvalidProfileData(_('A vlan interface cannot use a vlan interface.'))
super(VlanInterface, self).validate()
|
#!/usr/bin/env python3
import argparse
import sys
sys.path.insert(0, "../external_dependencies")
import time
from chirpsdk import ChirpSDK, CallbackSet
class Callbacks(CallbackSet):
def on_sending(self, payload, channel):
""" Called when a chirp has started to be transmitted """
print('Sending: {data} [ch{ch}]'.format(data=list(payload), ch=channel))
def on_sent(self, payload, channel):
""" Called when the entire chirp has been sent """
print('Sent: {data} [ch{ch}]'.format(data=list(payload), ch=channel))
def main(block_name, output_device,
block_size, sample_rate, command):
# Initialise ConnectSDK
sdk = ChirpSDK(block=block_name)
print(str(sdk))
print('Protocol: {protocol} [v{version}]'.format(
protocol=sdk.protocol_name,
version=sdk.protocol_version))
print(sdk.audio.query_devices())
if command is not None:
print("Command is %s" % command)
# Configure audio
sdk.audio.output_device = output_device
sdk.audio.block_size = block_size
sdk.input_sample_rate = sample_rate
sdk.output_sample_rate = sample_rate
# Set callback functions
sdk.set_callbacks(Callbacks())
if command:
message = command.encode('utf-8')
payload = sdk.new_payload(message)
else:
payload = sdk.random_payload()
sdk.start(send=True, receive=False)
sdk.send(payload, blocking=True)
print('Exiting')
sdk.stop()
def finished():
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='ChirpSDK Example',
epilog='Sends a random chirp payload, then continuously listens for chirps'
)
parser.add_argument('-c', help='The configuration block [name] in your ~/.chirprc file (optional)')
parser.add_argument('-o', type=int, default=None, help='Output device index (optional)')
parser.add_argument('-b', type=int, default=0, help='Block size (optional)')
parser.add_argument('-s', type=int, default=44100, help='Sample rate (optional)')
parser.add_argument('-u', type=str, default=None, help='Sending String')
args = parser.parse_args()
main(args.c, args.o, args.b, args.s, args.u)
|
import os
import time
from basic.common import printStatus
from mlengine_const import DEFAULT_BLOCK_SIZE
INFO = __file__
def classify_large_data(model, imset, feat_file, prob_output=False, blocksize=DEFAULT_BLOCK_SIZE):
start = 0
results = []
read_time = 0.0
test_time = 0.0
while start < len(imset):
end = min(len(imset), start + blocksize)
printStatus(INFO, 'classifying images from %d to %d' % (start, end-1))
s_time = time.time()
renamed,vectors = feat_file.read(imset[start:end])
read_time += time.time() - s_time
s_time = time.time()
if prob_output:
scores = [model.predict_probability(vectors[i]) for i in range(len(renamed))]
else:
scores = [model.predict(vectors[i]) for i in range(len(renamed))]
test_time += time.time() - s_time
results += zip(renamed, scores)
start = end
#printStatus('%.sclassifyLargeData'%INFO, 'read time %g seconds, test time %g seconds' % (read_time, test_time))
results.sort(key=lambda v: (v[1], v[0]), reverse=True)
return results
|
__author__ = "Bar Harel"
__version__ = "0.1.0"
__license__ = "MIT License"
__all__ = ["RunningTasks"]
from asyncio import ensure_future
from asyncio.futures import isfuture
from contextvars import copy_context, Context
from typing import (
Any, Awaitable, Callable, Iterable,
Iterator, List, MutableSet, Optional,
Set, Tuple, TypeVar, overload)
from asyncio import Task, get_running_loop, Future
_F = TypeVar("_F", bound=Future)
class RunningTasks(MutableSet[Future]):
"""A set of actively running asyncio tasks or futures.
Tasks can be added and will be automatically removed
from this set when done.
Calling wait() will wait until all tasks are done.
Warning: This set may change while iterating over it
if any task is done in the background.
"""
def __init__(self, it: Iterable[Future] = None) -> None:
self._tasks: Set[Future] = set(it or ())
self._waiter: Optional[Future] = None
self._callbacks: List[
Tuple[Callable[[Future], Any], Context]] = []
def add(self, task: Future) -> None:
"""Add a task to the set of active tasks."""
if not isfuture(task):
raise TypeError(
f"task must be a Future object, not {type(task)!r}. "
f"Have you meant {self.__class__.__name__}.create_task()?")
self._tasks.add(task)
task.add_done_callback(self._task_done)
def discard(self, task: Future) -> None:
"""Remove a task from the set of active tasks."""
if not isfuture(task):
raise TypeError("task must be a Future object.")
self._tasks.discard(task)
task.remove_done_callback(self._task_done)
self._wakeup() # Check if there are no more tasks
def add_done_handler(self, fn: Callable[[Future], Any], *,
context: Optional[Context] = None) -> None:
"""Add a callback to be run when EACH of the tasks becomes done.
The callback is called with a single argument - the future object.
Useful for implementing exception handlers or retrieving results.
Args:
fn: The callback function to be called when a task is done.
context: Optionally, the contextvars context to run the
callback in. Defaults to the current context.
"""
if context is None:
context = copy_context()
self._callbacks.append((fn, context))
def remove_done_handler(self, fn: Callable[[Future], Any]) -> int:
"""Remove all instances of a callback from the "call when done" list.
Args:
fn: The callback to remove.
Returns:
The number of callbacks removed.
"""
filtered_callbacks = [(f, ctx)
for (f, ctx) in self._callbacks
if f != fn]
removed_count = len(self._callbacks) - len(filtered_callbacks)
if removed_count:
self._callbacks[:] = filtered_callbacks
return removed_count
def __iter__(self) -> Iterator[Future]:
"""Iterate over all tasks"""
return iter(self._tasks)
def __len__(self) -> int:
"""Return the number of tasks in the set"""
return len(self._tasks)
def __contains__(self, task: Future) -> bool:
"""Check if a task is in the set of active tasks"""
return task in self._tasks
def _wakeup(self):
"""Check if all tasks are done and we can wakeup the waiter"""
if not self._tasks and (waiter := self._waiter):
self._waiter = None
waiter.set_result(None)
def _task_done(self, task: Future) -> None:
"""Called when a task is done.
Removes the task from the set of active tasks
and runs any callbacks
"""
self._tasks.discard(task)
loop = get_running_loop()
for (fn, context) in self._callbacks:
loop.call_soon(fn, task, context=context)
self._wakeup()
@overload
def create_task(self, awaitable: _F) -> _F:
...
@overload
def create_task(self, awaitable: Awaitable) -> Task:
...
def create_task(self, awaitable):
"""Schedule an awaitable to be executed.
Calls ensure_future and adds the task.
Args:
awaitable: The awaitable to be executed.
Returns:
A future representing the execution of the awaitable.
If a task was given, it is returned.
"""
task = ensure_future(awaitable)
self.add(task)
return task
async def wait(self) -> None:
"""Wait for all tasks to finish.
Note: If tasks are added while waiting, in rare cases
the new tasks might not be waited for. This behavior
is intended.
"""
if not self._tasks:
return
if self._waiter is None:
self._waiter = get_running_loop().create_future()
# If a task was added after waiter.set_result() was scheduled we
# will still have pending tasks when wait() returns. This should
# not happen frequently unless a "done_handler" schedules a task
# to run. For simplicity and optimization reasons, that edge case
# is left unchecked.
await self._waiter
|
class Solution(object):
def longestOnes(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
# DP, TLE
#
# length = len(A)
# dp = []
# for _ in range(K+1):
# dp.append([0]*length)
# for step in range(K+1):
# for last in range(length):
# if last == 0:
# if step == 0:
# dp[0][0] = A[0]
# else:
# dp[step][0] = 1
# elif step == 0:
# if A[last] == 1:
# dp[0][last] = dp[0][last-1]+1
# else:
# dp[0][last] = 0
# elif step > last+1:
# dp[step][last] = dp[last+1][last]
# else:
# if A[last] == 1:
# dp[step][last] = dp[step][last-1] + 1
# else:
# dp[step][last] = dp[step-1][last-1]+1
# print(dp)
# return max(dp[K])
# Sliding window
lidx = 0
maxl = 0
k_left = K
length = 0
for ridx, rval in enumerate(A):
if rval == 1:
length += 1
maxl = max(maxl, length)
continue
# rval == 0
if k_left > 0:
k_left -= 1
length += 1
maxl = max(maxl, length)
else:
if rval == 1:
length += 1
maxl = max(maxl, length)
else:
while A[lidx] == 1:
lidx += 1
assert A[lidx] == 0
lidx += 1
length = ridx - lidx+1
maxl = max(maxl, length)
return maxl
s = Solution()
print(s.longestOnes([0,0,0,1], 4))
|
from django.conf import settings
from django.core.mail import EmailMessage
from django.db import models
from django.shortcuts import resolve_url
from django.template.loader import render_to_string
from django.utils import timezone
import linebot
from linebot.models import TextSendMessage
import requests
class Post(models.Model):
title = models.CharField('タイトル', max_length=255)
text = models.TextField('本文')
created_at = models.DateTimeField('作成日', default=timezone.now)
def __str__(self):
return self.title
def email_push(self, request):
"""記事をメールで通知"""
context = {
'post': self,
}
subject = render_to_string('blog/notify_subject.txt', context, request)
message = render_to_string('blog/notify_message.txt', context, request)
from_email = settings.DEFAULT_FROM_EMAIL
bcc = [settings.DEFAULT_FROM_EMAIL]
for mail_push in EmailPush.objects.filter(is_active=True):
bcc.append(mail_push.email)
email = EmailMessage(subject, message, from_email, [], bcc)
email.send()
def line_push(self, request):
"""記事をラインで通知"""
context = {
'post': self,
}
message = render_to_string('blog/notify_message.txt', context, request)
line_bot_api = linebot.LineBotApi('発行したアクセストークン')
for push in LinePush.objects.all():
line_bot_api.push_message(push.user_id, messages=TextSendMessage(text=message))
def browser_push(self, request):
"""記事をブラウザ通知"""
data = {
'app_id': 'あなたのAPP ID',
'included_segments': ['All'],
'contents': {'en': self.title},
'headings': {'en': 'Naritoブログ'},
'url': resolve_url('blog:detail', pk=self.pk),
}
requests.post(
"https://onesignal.com/api/v1/notifications",
headers={'Authorization': 'Basic あなたのREST API Key'},
json=data,
)
class EmailPush(models.Model):
"""メールでのプッシュ先を表す"""
email = models.EmailField('メールアドレス', unique=True)
is_active = models.BooleanField('有効フラグ', default=False)
def __str__(self):
return self.email
class LinePush(models.Model):
"""Lineでのプッシュ先を表す"""
user_id = models.CharField('ユーザーID', max_length=100, unique=True)
def __str__(self):
return self.user_id
|
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
# Create your models here.
class Imagen(models.Model):
denominacion = models.CharField(max_length=50)
file = models.BinaryField()
content_type_file = models.CharField(max_length=50)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object_padre = GenericForeignKey('content_type','object_id')
lugar = models.CharField(max_length=50, default="None")
|
"""
Django settings for searchblueprints project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0zc1ljd^8@qyq%(@)v9i8)_x=ytwn44^hgg&j0dwo$vvr=grjs'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'searchsample',
'whoosh',
'haystack',
'home',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'searchblueprints.urls'
WSGI_APPLICATION = 'searchblueprints.wsgi.application'
WHOOSH_INDEX = os.path.join(BASE_DIR, "whoosh/")
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'searchengine', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'postgres',
'PASSWORD': '1234',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '',
}
}
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': WHOOSH_INDEX
},
# 'elastic': {
# 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
# 'URL': 'http://127.0.0.1:9200/',
# 'INDEX_NAME': 'haystack',
# },
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(
BASE_DIR,
'static',
),
)
#Templates
TEMPLATE_DIRS = (
'./templates',
)
|
from django.urls import path
from . import views
app_name = 'docs'
urlpatterns = [
path('', views.list_docs, name='index'),
path('<int:doc_id>', views.view_doc, name='view_doc'),
path('<int:doc_id>/authors/', views.view_doc_authors, name='view_doc_authors')
]
|
"""
file: magnet_data.py
author: David Fairbairn
date: September 2016
For looking at magnetometer data from ePOP and computing magnetic field model
predictions.
"""
from ..utils import data_utils
import analysis_tools
import h5py
import numpy as np
sample_mgf_datafile = "data/mgf/MGF_20160418_222505_224033_V_01_00_00.1sps.GEI.lv3"
def read_mgf_file(fname):
# if doesn't exist, exit with error
# else try to read in the columns as we expect them
#f = FileLineWrapper(open(fname,'r'))
# FileLineWrapper functionality (seeking/returning line #'s) isn't really
# necessary for reading in file data once - so I don't bother using it
ephtimes = []
Bscx = []
Bscy = []
Bscz = []
f = open(fname,'r')
for ln in f:
spl = ln.split()
if spl[0]=='LV3_DESCRPT':
continue
## TODO: Make this check work for 5 decimal doubles (alt to isdigit()?)
#if not spl[1].isdigit():
# continue
# No need to retrieve other parts of data
ephtimes.append(float(spl[1])) # Ephem Times
Bscx.append(float(spl[2])) # B_SCx
Bscy.append(float(spl[3])) # B_SCy
Bscz.append(float(spl[4])) # B_SCz
return (Bscx, Bscy, Bscz, ephtimes)
def satframe2ned(sc_vecs,ram_dirs,yaws,pitchs,rolls):
"""
This function takes coordinates expressed in terms of directions with
respect to the satellite body. The x,y,z coordinates would normally
correspond to regular spacecraft coordinates, but in this case the body
of the spacecraft is assumed to be oriented away from its ram direction,
which is corrected for.
This correction is achieved by accounting for the craft's yaw, pitch, roll
values at each ephemeris point.
Note: yaw, pitch, roll are angles of rotation around three 'SC' coordinate
axes. 'X' is in the ram direction, 'Z' is the axis from the craft body to the
Earth's centre, and 'Y' is 'Z' cross 'X'.
The assumption in this function is that the coordinates in sc_vecs are
not actually yet in proper SC/('X','Y','Z') components yet - that they are
tilted around with the satellite body.
*** PARAMS ***
sc_vecs: the coords in terms of the spacecraft body (x=ram, z=nadir)
ram_dirs: the ram direction in N-E-D components
yaws: rotation around spacecraft Z axis in degrees
pitchs: rotation around spacecraft Y axis in degrees
rolls: rotation around spacecraft X axis in degrees
*** RETURNS ***
outs: output vector (should be in same units as input)
**!!Standards Dilemma!!**
Word of Gareth is that x is ram dir, z is nadir dir, and y is Z cross X.
But then if satellite has vertical component (which it does, though its
small), this isn't a right handed coord system. Possible solutions are:
- ignore it and hope the error is small (should be)
- define x to just be ram direction in North and East directions (not down)
Initially, I proceed with solution approach #1 for simplicity
"""
# Step 1. Compute Spacecraft X Y Z directions from the ram direction
xdirs = [ram_dirs[i]/np.linalg.norm(ram_dirs[i]) for i in range(len(ram_dirs))]
print "First normalized ram dir (x dir): "+str(xdirs[0]) #reminder to self: check if its already normalized
zdirs = np.array([(0.,0.,1.) for i in range(xdirs.__len__())]) #Just down
ydirs = np.cross(zdirs,xdirs)
# Step 2. Reverse rotate by amounts described in roll, pitch, yaw
outs = []
for i in range(xdirs.__len__()):
roll_rot = analysis_tools.rotation_matrix(xdirs[i],np.deg2rad(-rolls[i]))
pitch_rot = analysis_tools.rotation_matrix(ydirs[i], np.deg2rad(-pitchs[i]))
yaw_rot = analysis_tools.rotation_matrix(zdirs[i], np.deg2rad(-yaws[i]))
#print "Roll rotation matrix magnitude: ",np.linalg.norm(roll_rot)
#print "Pitch rotation matrix magnitude: ",np.linalg.norm(pitch_rot)
#print "Yaw rotation matrix magnitude: ",np.linalg.norm(yaw_rot)
"""
intermed1 = np.dot(roll_rot, sc_vecs[i])
intermed2 = np.dot(pitch_rot, intermed1)
intermed3 = np.dot(yaw_rot, intermed2)
"""
intermed3 = sc_vecs[i] # Temporarily looking at what happens if I don't bother accounting for yaw/roll/pitch
A = np.array((xdirs[i],ydirs[i],zdirs[i]))
Ainv = np.linalg.inv(A)
#print "Spatial conversion matrix: ",A
#print "Spatial conversion matrix magnitude: ",np.linalg.norm(A)
#exit_rri()
out = np.dot(intermed3, Ainv)
outs.append(out)
return np.array(outs)
def sc2ned(sc_vecs,ram_dirs):
"""
Takes some space-craft based coordinates and the spacecraft's ram direction
in North-East-Down components and converts another set of vectors written
in terms of spacecraft coordinates, and converts those to NED.
*** PARAMS ***
sc_vecs: the coords in terms of the spacecraft frame (x=ram, z=nadir)
ram_dirs: the ram direction in N-E-D components
*** RETURNS ***
outs: output vector (should be in same units as input)
**!!Standards Dilemma!!**
Word of Gareth is that x is ram dir, z is nadir dir, and y is Z cross X.
But then if satellite has vertical component (which it does, though its
small), this isn't a right handed coord system. Possible solutions are:
- ignore it and hope the error is small (should be)
- define x to just be ram direction in North and East directions (not down)
Initially, I proceed with solution approach #1 for simplicity
"""
# Step 1. Compute Spacecraft X Y Z directions from the ram direction
xdirs = [ram_dirs[i]/np.linalg.norm(ram_dirs[i]) for i in range(len(ram_dirs))]
print "First normalized ram dir (x dir): ",xdirs[0] #reminder to self: check if its already normalized
zdirs = np.array([(0.,0.,1.) for i in range(xdirs.__len__())]) #Just down
ydirs = np.cross(zdirs,xdirs)
outs = []
for i in range(xdirs.__len__()):
intermed = sc_vecs[i] # Temporarily looking at what happens if I don't bother accounting for yaw/roll/pitch
A = np.array((xdirs[i],ydirs[i],zdirs[i]))
Ainv = np.linalg.inv(A)
#print "Spatial conversion matrix: ",A
#print "Spatial conversion matrix magnitude: ",np.linalg.norm(A)
#exit_rri()
out = np.dot(intermed, Ainv)
outs.append(out)
return np.array(outs)
def ned2sc(ned_vecs,ram_dirs):
"""
Also need a way to compute SC coords from NED coords. This function is the
inverse of sc2ned3
"""
# Step 1. Compute Spacecraft X Y Z directions from the ram direction
xdirs = [ram_dirs[i]/np.linalg.norm(ram_dirs[i]) for i in range(len(ram_dirs))]
print "First normalized ram dir (x dir): ",xdirs[0] #reminder to self: check if its already normalized
zdirs = np.array([(0.,0.,1.) for i in range(xdirs.__len__())]) #Just down
ydirs = np.cross(zdirs,xdirs)
outs = []
for i in range(xdirs.__len__()):
intermed = ned_vecs[i]
A = np.array((xdirs[i],ydirs[i],zdirs[i]))
#print "Spatial conversion matrix: ",A
#print "Spatial conversion matrix magnitude: ",np.linalg.norm(A)
#exit_rri()
out = np.dot(A,intermed)
outs.append(out)
return np.array(outs)
def get_igrf(lons,lats,alts,ephtimes):
"""
Highly visible getter method for acquiring Earth's magnetic field values
for the provided ndarray of longitudes/latitudes/altitudes/times. Uses the
IGRF model
"""
from davitpy.models import igrf
from davitpy import utils
itype = 1 #Geodetic coordinates
stp = 1.
ifl = 0
times = data_utils.ephems_to_datetime(ephtimes)
B_igrf = np.zeros((len(times),3))
for i, time in enumerate(times):
date = data_utils.dateToDecYear(time)
lon = lons[i]
lat = lats[i]
alt = alts[i]
xlti, xltf, xltd = lat, lat, stp
xlni, xlnf, xlnd = lon, lon, stp
# Call fortran subroutine
lat,lon,d,s,h,bx,by,bz,f = igrf.igrf11(itype,date,alt,ifl,xlti,xltf,xltd,xlni,xlnf,xlnd)
B_igrf[i,:] = np.array((bx[0],by[0],bz[0]))
return np.array(B_igrf)
def cmp_igrf_magnetometer(fname=sample_mgf_datafile, date_string="20160418"):
"""
I use this to compare the IGRF model to the recorded magnetic field by the
MGF instrument onboard ePOP.
"""
# Set this to correspond to the mgf file at the top until mgf file selection is possible
datpath,datname = data_utils.initialize_data()
rrifname,index_reversal = data_utils.get_ottawa_data(date_string)
# Get RRI ephemeris data together so that we can remove effect of spacecraft direction
lons,lats,alts,ephtimes,mlons,mlats,mlts,pitchs,yaws,rolls = data_utils.get_rri_ephemeris_full(rrifname)
ephtimes = np.array([ round(e) for e in ephtimes]) # crucial for comparing mgf and rri times
vs,dists = analysis_tools.get_ramdirs(lons, lats, alts)
# Calculate IGRF at each ephemeris point
# import os
# import sys
# Redirect stdout so that IGRF can't print to the screen and spam me
# sys.stdout = open(os.devnull, "w") # turns out it doesnt work because IGRF stuff is in fortran/handled separately
B_igrf,kvecs,angles = analysis_tools.get_kb_angle(lons,lats,alts,ephtimes)
# sys.stdout = sys.__stdout__ # Reconnect stdout
# TMP change! Leaving both arrays of B field measurements unchanged and playing with spacepy on them
#B_igrf = ned2sc(np.array(B_igrf),vs) #Converting from NED to SC!
B_igrf = np.array(B_igrf)
# Acquire the MGF data
bscx, bscy, bscz, ephtimes_bsc = data_utils.read_mgf_file(fname)
B_mgf_intermediate = [ (bscx[i],bscy[i],bscz[i]) for i in range(len(bscx))]
print "Intermediate B_mgf first entry:\n",B_mgf_intermediate[0]
print "Magnitude: ",np.linalg.norm(B_mgf_intermediate[0])
# Need to compare the time of the different data sets
times_rri = data_utils.ephems_to_datetime(ephtimes)
times_mgf = data_utils.ephems_to_datetime(np.array(ephtimes_bsc))
print "times_mgf's first few entries look like:\n",times_mgf[0:3]
print "times_rri's first few entries look like:\n",times_rri[0:3]
print "Length of mgf times:\n",len(ephtimes_bsc)
print "Length of rri times:\n",len(ephtimes)
try:
i_rristart = times_mgf.index(times_rri[0])
print "Index of mgf data at which point rri starts taking data:\t",i_rristart
print "times_mgf_iso[i_rristart]:\t",times_mgf[i_rristart]
print "times_rri_iso[0]:\t",times_rri[0]
except ValueError:
print "Failed to find where RRI starts in MGF data."
print times_rri_iso
B_mgf = np.array(B_mgf_intermediate)
return B_mgf,B_igrf
def plot_comparison(fname=sample_mgf_datafile, date_string="20160418"):
"""
Function to plot the components of MGF and IGRF B fields against each other.
Note: Currently the IGRF might not be correctly converted so the components
won't be similar to each other at all.
"""
rri_fname,index_reversal = data_utils.get_ottawa_data(date_string)
lons,lats,alts,ephtimes,mlons,mlats,mlts,pitchs,yaws,rolls = data_utils.get_rri_ephemeris_full(rri_fname)
B_mgf,B_igrf = cmp_igrf_magnetometer(fname,date_string)
plt.plot(B_mgf[:,0],'b',label="MGF SC_X Component")
plt.plot(B_igrf[:,0],'r',label="IGRF SC_X Component")
plt.legend()
plt.title("Comparison of B Component in SC_X Direction")
#ephem_ticks(lons,lats,alts,ephtimes,mlons,mlats,mlts)
plt.show()
plt.plot(B_mgf[:,1],'b',label="MGF SC_Y Component")
plt.plot(B_igrf[:,1],'r',label="IGRF SC_Y Component")
plt.legend()
plt.title("Comparison of B Component in SC_Y Direction")
#ephem_ticks(lons,lats,alts,ephtimes,mlons,mlats,mlts)
plt.show()
plt.plot(B_mgf[:,2],'b',label="MGF SC_Z Component")
plt.plot(B_igrf[:,2],'r',label="IGRF SC_Z Component")
plt.legend()
plt.title("Comparison of B Component in SC_Z Direction")
#ephem_ticks(lons,lats,alts,ephtimes,mlons,mlats,mlts)
plt.show()
if __name__ == "__main__":
B_mgf,B_igrf = cmp_igrf_magnetometer()
print "MGF B field magnitude: ",np.linalg.norm(B_mgf[0]),B_mgf[0]
print "IGRF B field magnitude: ",np.linalg.norm(B_igrf[0]),B_igrf[0]
#plot_comparison()
"""
bscx, bscy, bscz, ephtimes_bsc = data_utils.read_mgf_file(sample_mgf_datafile)
B_mgf = [ (bscx[i],bscy[i],bscz[i]) for i in range(len(bscx))]
#print B_mgf
date_string = "20160418"
fname, index_reversal = data_utils.get_ottawa_data(date_string)
lons,lats,alts,ephtimes,mlons,mlats,mlts,pitchs,yaws,rolls = data_utils.get_rri_ephemeris_full(fname)
vs,dists = analysis_tools.get_ramdirs(lons,lats,alts,ephtimes)
print B_mgf[0]
out = sc2ned2(B_mgf,vs,yaws,pitchs,rolls)
"""
|
from fastapi import Depends, status
from fastapi import APIRouter
from fastapi.security import HTTPBasic
import os
from dotenv import load_dotenv
load_dotenv()
from ..auth.authentication import authenticate_admin, authenticate_webuser
security = HTTPBasic()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# User main server route listener
user_router = APIRouter(
prefix = os.getenv("API_URL") + '/users',
tags=['users']
)
@user_router.get("/admin", status_code = status.HTTP_200_OK)
async def read_current_user(administrator: str = Depends(authenticate_admin)):
"""
this function route returns the administrator db manager authenticate username
"""
return {"administrator": administrator}
@user_router.get("/me", status_code = status.HTTP_200_OK)
async def read_current_user(username: str = Depends(authenticate_webuser)):
"""
this function route returns the webservice API authenticate username
"""
return {"username": username}
|
from collections import OrderedDict
from typing import Any, Callable, Iterable, List
from typing import MutableMapping as MutableMappingType
from typing import Optional, Type, TYPE_CHECKING, TypeVar
from valohai_yaml.types import SerializedDict
if TYPE_CHECKING:
from valohai_yaml.objs.base import Item
TItem = TypeVar('TItem', bound='Item')
T = TypeVar('T')
def consume_array_of(source: SerializedDict, key: str, type: Type[TItem]) -> List[TItem]:
return [type.parse(datum) for datum in source.pop(key, ())]
def check_type_and_listify(
source: Optional[Iterable[Any]],
type: Type[T],
parse: Optional[Callable[[Any], T]] = None,
) -> List[T]:
"""
Check that all items in the `source` iterable are of the type `type`, return a list.
If `parse` is given, and the item is not of the type, that function is called to parse it to one.
"""
if source is None:
return []
out = []
for item in source:
if not isinstance(item, type):
if not parse:
raise TypeError(f"{item} not a {type}")
item = parse(item)
assert isinstance(item, type) # Make sure `parse` was up to spec
out.append(item)
return out
# TODO: use `typing.OrderedDict` as return type when only 3.7.2+ supported
def check_type_and_dictify(source: Optional[Iterable[Any]], type: Type[T], attr: str) -> MutableMappingType[str, T]:
"""Check that all items in the `source` iterable are of the type `type` and map them into an OrderedDict."""
out = OrderedDict() # type: OrderedDict[str, T]
if source is None:
return out
for item in source:
if not isinstance(item, type):
raise TypeError(f"{item} not a {type}")
out[getattr(item, attr)] = item
return out
def serialize_into( # noqa: ANN301
dest, # type: OrderedDict[str, Any]
key: str,
value: Any,
*,
flatten_dicts: bool = False,
elide_empty_iterables: bool = False
) -> None:
if value is None:
return
if flatten_dicts and isinstance(value, dict):
value = list(value.values())
if isinstance(value, (tuple, list)): # can't use collections.Collection :(
if elide_empty_iterables and not value:
return
value = [_serialize_if_able(item) for item in value]
else:
value = _serialize_if_able(value)
dest[key] = value
def _serialize_if_able(v: Any) -> Any:
return (v.serialize() if hasattr(v, 'serialize') else v)
|
import time
import os
import pickle
import numpy as np
from mpi4py import MPI
from src import logger
from src.utils.util import mpi_average
class StatsLogger:
def __init__(self, training_worker, evaluation_worker, policy, reward_function, goal_sampler, data_processor, params):
self.training_worker = training_worker
self.evaluation_worker = evaluation_worker
self.policy = policy
self.reward_function = reward_function
self.goal_sampler = goal_sampler
self.data_processor = data_processor
self.best_success_rate = -1e6
self.first_time = time.time()
self.last_time = time.time()
self.params = params
self.rank = MPI.COMM_WORLD.Get_rank()
self.logdir = self.params['experiment_params']['logdir']
self.nb_goals = len(params['train_descriptions'])
if self.rank == 0:
self.latest_policy_path = os.path.join(logger.get_dir(), 'policy_checkpoints/policy_latest.pkl')
self.best_policy_path = os.path.join(logger.get_dir(), 'policy_checkpoints/policy_best.pkl')
self.periodic_policy_path = os.path.join(logger.get_dir(), 'policy_checkpoints/policy_{}.pkl')
def compute_reward_function_metrics(self, epoch, episode_count):
# Compute and log reward function metrics
if self.rank == 0:
if self.params['conditions']['reward_function'] in ['learned_randomforest', 'pretrained_lstm', 'learned_lstm'] :
save_header = False
if epoch == 0:
save_header = True
if len(self.reward_function.recent_metrics_record) > 0:
with open(os.path.join(self.logdir, 'reward_func_metrics.csv'), 'a') as f:
df = self.reward_function.recent_metrics_record[-1].reset_index()
df['epoch'] = epoch
logger.info(df)
df.to_csv(f, header=save_header, index=False)
# Save stats confusion matrix
stats_confusion_rew_func = [np.zeros([2, 2]) for _ in range(len(self.params['train_descriptions']))]
stats_confusion = self.data_processor.stats_confusion_rew_func
for i in range(len(self.params['train_descriptions'])):
if len(stats_confusion[i][0]) < 20:
stats_confusion_rew_func[i][0, 0] = 0.5
stats_confusion_rew_func[i][0, 1] = 0.5
else:
stats_confusion_rew_func[i][0, 0] = 1 - np.mean(stats_confusion[i][0])
stats_confusion_rew_func[i][0, 1] = np.mean(stats_confusion[i][0])
if len(stats_confusion[i][1]) < 20:
stats_confusion_rew_func[i][1, 0] = 0.5
stats_confusion_rew_func[i][1, 1] = 0.5
else:
stats_confusion_rew_func[i][1, 0] = 1 - np.mean(
stats_confusion[i][1])
stats_confusion_rew_func[i][1, 1] = np.mean(stats_confusion[i][1])
for j in range(2):
for k in range(2):
if np.isnan(stats_confusion_rew_func[i][j, k]):
stats_confusion_rew_func[i][j, k] = 0.5
with open(self.logdir + 'goal_info/stats_confusion_rew_func_' + str(episode_count) + '.pk', 'wb') as f:
pickle.dump(stats_confusion_rew_func, f)
def compute_metrics(self, epoch, episode_count, eval_success_rate, time_logs):
self.compute_reward_function_metrics(epoch, episode_count)
# Save observations
if self.params['experiment_params']['save_obs'] and self.rank == 0:
with open(self.logdir + 'obs_' + str(epoch) + '.pk', 'wb') as f:
pickle.dump(self.data_processor.states_to_save, f)
self.data_processor.clear_memory_states_to_save()
with open(self.logdir + 'goal_discovery_' + str(epoch) + '.pk', 'wb') as f:
pickle.dump(self.data_processor.goal_sampler.feedback_memory['iter_discovery'], f)
self.dump_goal_metrics(episode_count)
logs = self.compute_and_log_interaction_metrics(episode_count, eval_success_rate)
# record logs
for key, val in self.policy.logs():
logger.record_tabular(key, mpi_average(val))
logger.info(len(logs))
for key, val in logs:
logger.record_tabular(key, mpi_average(val))
logger.record_tabular('pos_rew_ratio', mpi_average(self.policy.get_replay_ratio_positive_reward_stat()))
if self.rank == 0:
logger.record_tabular('total_duration (s)', time.time() - self.first_time)
logger.record_tabular('epoch_duration (s)', time.time() - self.last_time)
self.last_time = time.time()
logger.record_tabular('epoch', epoch)
for key, value in time_logs.time_stats.items():
logger.record_tabular(key, "{:.3f}".format(value))
logger.dump_tabular()
success_rate = mpi_average(eval_success_rate)
if self.rank == 0:
# Save the policy if it's better than the previous ones
self.evaluation_worker.save_policy(self.latest_policy_path)
if self.params['conditions']['reward_function'] != 'oracle':
self.reward_function.save_checkpoint(self.params['experiment_params']['logdir'] + 'reward_checkpoints/reward_func_latest_checkpoint')
if success_rate >= self.best_success_rate:
self.best_success_rate = success_rate
logger.info('New best success rate: {}. Saving policy to {} ...'.format(self.best_success_rate, self.best_policy_path))
self.evaluation_worker.save_policy(self.best_policy_path)
if self.params['conditions']['reward_function'] != 'oracle':
self.reward_function.save_checkpoint(self.params['experiment_params']['logdir'] + 'reward_checkpoints/reward_func_best_checkpoint')
# Save policy periodically
if epoch % self.params['experiment_params']['policy_save_interval'] == 0:
policy_path = self.periodic_policy_path.format(epoch)
logger.info('Saving periodic policy to {} ...'.format(policy_path))
self.evaluation_worker.save_policy(policy_path)
if self.params['conditions']['reward_function'] != 'oracle':
self.reward_function.save_checkpoint(self.params['experiment_params']['logdir'] + 'reward_checkpoints/reward_func_checkpoint_{}'.format(str(epoch)))
def dump_goal_metrics(self, episode_count):
if self.rank == 0:
info = dict(discovered_goals=self.goal_sampler.feedback_memory['string'],
replay_proba=self.policy.replay_proba,
exploration_metrics=self.data_processor.exploration_tracker.metrics.copy()
)
with open(self.logdir+ 'goal_info/info_' + str(episode_count) + '.pk', 'wb') as f:
pickle.dump(info, f)
self.data_processor.exploration_tracker.reset_metrics()
def compute_and_log_interaction_metrics(self, episode_count, eval_success_rate):
logs = []
prefix = 'eval'
logs += [('z_current_eval_success_rate', eval_success_rate)]
logs += [('episode', episode_count)]
for i in range(self.nb_goals):
if len(self.data_processor.evaluation_return_histories[i]) > 0:
mean = np.mean(self.data_processor.evaluation_return_histories[i])
else:
mean = 0
logs += [(prefix + '/success_goal_' + str(i), mean)]
logs += [(prefix + '/mean_Q', np.mean(self.evaluation_worker.Q_history))]
return logs
|
# Generated by Django 3.2.3 on 2021-05-29 20:46
from django.apps.registry import Apps
from django.db import migrations, models
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
def migrate_to_username(apps: Apps, schema_editor: BaseDatabaseSchemaEditor):
db_alias = schema_editor.connection.alias
UserReputation = apps.get_model("authentik_policies_reputation", "userreputation")
for rep in UserReputation.objects.using(db_alias).all():
rep.username = rep.user.username
rep.save()
class Migration(migrations.Migration):
dependencies = [
("authentik_policies_reputation", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="userreputation",
name="username",
field=models.TextField(default=""),
preserve_default=False,
),
migrations.RunPython(migrate_to_username),
migrations.RemoveField(
model_name="userreputation",
name="user",
),
migrations.AlterField(
model_name="userreputation",
name="username",
field=models.TextField(),
),
]
|
from scdown.neo import (Neo,
NODE_USER,
NODE_TRACK,
NODE_COMMENT,
NODE_PROFILE,
REL_FOLLOWS,
REL_UPLOADED,
REL_FAVORITED,
REL_HAS_PROFILE,
REL_WROTE,
REL_REFERS_TO)
from py2neo import Graph
from itertools import product
from nose.tools import with_setup
TEST_DB = "http://127.0.0.1:8585/db/data/"
graph = Graph(TEST_DB)
neo = Neo(graph)
def setup_func():
pass
def teardown_func():
graph.delete_all()
datum = {"id": 1, "name": "Me"}
datum2 = dict(datum)
nested = {"new": {"data": True, "deeply": "nested"}}
datum2["novum"] = nested
def test_deflate():
flat = neo.deflate(datum2)
# adds keys due to nesting
assert len(flat) == len(datum2) + 1
# idempotent
assert flat == neo.deflate(flat)
@with_setup(setup_func, teardown_func)
def test_create_or_update_node():
datum = {"id": 1, "name": "Me"}
datum1 = dict(datum)
datum1["color"] = "red"
node = neo.create_or_update_node(NODE_USER, datum)
assert node.exists
assert NODE_USER in node.labels
node2 = neo.create_or_update_node(NODE_USER, datum1)
assert node.ref == node2.ref
assert node.properties == datum1
@with_setup(setup_func, teardown_func)
def test_node_types():
nodes = set()
for n in [NODE_USER, NODE_COMMENT, NODE_PROFILE, NODE_TRACK]:
node = neo.create_or_update_node(n, datum)
nodes.add(node)
assert len(nodes) == 4
@with_setup(setup_func, teardown_func)
def test_relation_types():
nodes = {}
acceptable = set(
[(NODE_USER, REL_HAS_PROFILE, NODE_PROFILE),
(NODE_USER, REL_FOLLOWS, NODE_USER),
(NODE_USER, REL_UPLOADED, NODE_TRACK),
(NODE_USER, REL_FAVORITED, NODE_TRACK),
(NODE_USER, REL_WROTE, NODE_COMMENT),
(NODE_COMMENT, REL_REFERS_TO, NODE_TRACK)])
accepted = set()
rel_types = [REL_FOLLOWS,
REL_UPLOADED,
REL_FAVORITED,
REL_HAS_PROFILE,
REL_WROTE,
REL_REFERS_TO]
for n in [NODE_USER, NODE_COMMENT, NODE_PROFILE, NODE_TRACK]:
node = neo.create_or_update_node(n, datum)
nodes[n] = node
combos = product(nodes.items(), repeat=2)
for c1, c2 in (tuple(prod) for prod in combos):
k1, n1 = c1
k2, n2 = c2
for r in rel_types:
try:
neo.mk_relation(n1, r, n2)
accepted.add((k1, r, k2))
except AssertionError:
pass
assert acceptable == accepted
@with_setup(setup_func, teardown_func)
def test_nested_properties():
node = neo.create_or_update_node(NODE_COMMENT, datum2)
assert node.exists
assert "novum" in node.properties
assert node.properties["__json_novum"]
assert neo.inflate(node.properties) == datum2
|
# coding: utf-8
"""
certascale API
Certascale API documentation # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.domain_api import DomainApi # noqa: E501
from swagger_client.rest import ApiException
class TestDomainApi(unittest.TestCase):
"""DomainApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.domain_api.DomainApi() # noqa: E501
def tearDown(self):
pass
def test_certificate_create(self):
"""Test case for certificate_create
"""
pass
def test_certificate_delete(self):
"""Test case for certificate_delete
"""
pass
def test_certificate_get(self):
"""Test case for certificate_get
"""
pass
def test_certificate_list(self):
"""Test case for certificate_list
"""
pass
def test_domain_create(self):
"""Test case for domain_create
"""
pass
def test_domain_delete(self):
"""Test case for domain_delete
"""
pass
def test_domain_get(self):
"""Test case for domain_get
"""
pass
def test_domain_list(self):
"""Test case for domain_list
"""
pass
if __name__ == '__main__':
unittest.main()
|
def read_ESPRESSO_S2D(inpath,outname,air=True,nowave=False,molecfit=True,mode='HARPS'):
"""THIS IS A PYTHON TRANSLATION OF READ_DATA (BELOW). IT SHOULD NOT WORK
WITH A PATHS FILE, JUST A FOLDER THAT CONTAINS ONLY FITS FILES AND THEN
IT WORKS FROM THE KEYWORDS TO DO EVERYTHING AUTOMATICALLY.
WRITE GOOD TESTS AND DOCUMENTATION.
ALSO, ULTIMATELY THIS WILL NEED A WRAPPER THAT CAN SWITCH BETWEEN DIFFERENT STANDARD DATASETS.
IN THE CASE OF UVES (AND MAYBE MOST OTHER DATASETS) IT WILL NEED TO DEAL WITH BERV CORRECTIONS.
GREAT WAY TO DO THIS IS HERE: https://docs.astropy.org/en/stable/coordinates/velocities.html
DID THAT WITH JEHAN FOR 55 CNC E.
Set the nowave keyword to True if the dataset has no wave files associated with it.
This may happen if you downloaded ESO Advanced Data Products, which include
reduced science e2ds's but not reduced wave e2ds's. The wavelength solution
is still encoded in the fits header however, so we take it from there, instead.
IF IN THE FUTURE A BERV KEYWORD WOULD BE MISSING, I HAVE INCLUDED AN ASTROPY
IMPLEMENTATION THAT ACCURATELY CALCULATES THE BERV FROM THE MJD. SEE SYSTEM_PARAMETERS.PY
"""
import os
import pdb
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import sys
import lib.utils as ut
import lib.molecfit as mol
import pyfits
import copy
import scipy.interpolate as interp
import pickle
import lib.constants as const
#First check the input:
ut.typetest('inpath in read_ESPRESSO_S2D ',inpath,str)
ut.typetest('outname in read_ESPRESSO_S2D ',outname,str)
ut.typetest('air in read_ESPRESSO_S2D ',air,bool)
if os.path.exists(inpath) != True:
print("ERROR in read_ESPRESSO_S2D: Data input path (%s) does not exist." % inpath)
sys.exit()
filelist=os.listdir(inpath)
N=len(filelist)
if len(filelist) == 0:
print("ERROR in read_ESPRESSO_S2D: input folder (%s) is empty." % inpath)
sys.exit()
#The following variables define the lists in which all the necessary data will be stored.
framename=[]
header=[]
s1dhdr=[]
type=[]
texp=np.array([])
date=[]
mjd=np.array([])
ccfmjd=np.array([])
s1dmjd=np.array([])
npx=np.array([])
nrv=np.array([])
norders=np.array([])
e2ds=[]
s1d=[]
wave1d=[]
airmass=np.array([])
berv=np.array([])
wave=[]
blaze=[]
ccfs=[]
wavefile_used = []
outpath = ut.path('data/'+outname)
if os.path.exists(outpath) != True:
os.makedirs(outpath)
#ccftotal = 0 #This will hold the sum of the CCFs
e2ds_count = 0
sci_count = 0
wave_count = 0
ccf_count = 0
blaze_count = 0
s1d_count = 0
#MODE SWITCHING HERE:
catkeyword = 'EXTNAME'
bervkeyword = 'HIERARCH ESO QC BERV'
airmass_keyword1 = 'HIERARCH ESO TEL'
airmass_keyword2 = ' AIRM '
airmass_keyword3_start = 'START'
airmass_keyword3_end = 'END'
# berv=np.append(berv,hdr1['HIERARCH ESO QC BERV'])
# airmass=np.append(airmass,0.5*(hdr1['HIERARCH ESO TEL3 AIRM START']+hdr1['HIERARCH ESO TEL3 AIRM END']))
for i in range(N):
if filelist[i].endswith('S2D_A.fits'):
e2ds_count += 1
print(filelist[i])
#data,hdr=fits.getdata(inpath+filelist[i],header=True)
hdul = fits.open(inpath+filelist[i])
data = copy.deepcopy(hdul[1].data)
hdr = hdul[0].header
hdr2 = hdul[1].header
wavedata=copy.deepcopy(hdul[5].data)
hdul.close()
del hdul[1].data
if hdr2[catkeyword] == 'SCIDATA':
print('science keyword found')
framename.append(filelist[i])
header.append(hdr)
type.append('SCIENCE')
texp=np.append(texp,hdr['EXPTIME'])
date.append(hdr['DATE-OBS'])
mjd=np.append(mjd,hdr['MJD-OBS'])
npx=np.append(npx,hdr2['NAXIS1'])
norders=np.append(norders,hdr2['NAXIS2'])
e2ds.append(data)
sci_count += 1
berv=np.append(berv,hdr[bervkeyword]*1000.0)
telescope = hdr['TELESCOP'][-1]
airmass = np.append(airmass,0.5*(hdr[airmass_keyword1+telescope+' AIRM START']+hdr[airmass_keyword1+telescope+' AIRM END']))
wave.append(wavedata*(1.0-hdr[bervkeyword]*1000.0/const.c))
#Ok.! So unlike HARPS, ESPRESSO wavelengths are BERV corrected in the S2Ds.
#WHY!!!?. WELL SO BE IT. IN ORDER TO HAVE E2DSes THAT ARE ON THE SAME GRID, AS REQUIRED, WE UNDO THE BERV CORRECTION HERE.
#WHEN COMPARING WAVE[0] WITH WAVE[1], YOU SHOULD SEE THAT THE DIFFERENCE IS NILL.
#THATS WHY LATER WE JUST USE WAVE[0] AS THE REPRESENTATIVE GRID FOR ALL.
if filelist[i].endswith('CCF_A.fits'):
#ccf,hdr=fits.getdata(inpath+filelist[i],header=True)
hdul = fits.open(inpath+filelist[i])
ccf = copy.deepcopy(hdul[1].data)
hdr = hdul[0].header
hdr2 = hdul[1].header
hdul.close()
del hdul[1].data
if hdr2[catkeyword] == 'SCIDATA':
print('CCF ADDED')
#ccftotal+=ccf
ccfs.append(ccf)
ccfmjd=np.append(ccfmjd,hdr['MJD-OBS'])
nrv=np.append(nrv,hdr2['NAXIS1'])
ccf_count += 1
if filelist[i].endswith('S1D_A.fits'):
hdul = fits.open(inpath+filelist[i])
data_table = copy.deepcopy(hdul[1].data)
hdr = hdul[0].header
hdr2 = hdul[1].header
hdul.close()
del hdul[1].data
if hdr['HIERARCH ESO PRO SCIENCE'] == True:
s1d.append(data_table.field(2))
wave1d.append(data_table.field(1))
s1dhdr.append(hdr)
s1dmjd=np.append(s1dmjd,hdr['MJD-OBS'])
s1d_count += 1
#Now we catch some errors:
#-The above should have read a certain number of e2ds files.
#-A certain number of these should be SCIENCE frames.
#-There should be at least one WAVE file.
#-All exposures should have the same number of spectral orders.
#-All orders should have the same number of pixels (this is true for HARPS).
#-The wave frame should have the same dimensions as the order frames.
#-If nowave is set, test that all frames used the same wave_A calibrator.
#-The blaze file needs to have the same shape as the e2ds files.
#-The number of s1d files should be the same as the number of e2ds files.
if ccf_count != sci_count:
print("ERROR in read_ESPRESSO_S2D: There is a different number of science CCFs as there is science frames.")
sys.exit()
# if e2ds_count != s1d_count:
# print('ERROR in read_HARPS_e2ds: The numbers of 1ds and e2ds files are different.')
# print("These are the files and their types:")
# for i in range(len(type)):
# print(' '+framename[i]+' %s' % type[i])
# sys.exit()
if e2ds_count == 0:
print("ERROR in read_ESPRESSO_S2D: The input folder (%s) does not contain files ending in e2ds.fits." % inpath)
sys.exit()
if sci_count == 0:
print("ERROR in read_ESPRESSO_S2D: The input folder (%2) contains e2ds files, but none of them are classified as SCIENCE frames with the HIERARCH ESO DPR CATG/OBS-TYPE keyword.")
print("These are the files and their types:")
for i in range(len(type)):
print(' '+framename[i]+' %s' % type[i])
sys.exit()
if np.max(np.abs(norders-norders[0])) == 0:
norders=int(norders[0])
else:
print("ERROR in read_ESPRESSO_S2D: Not all files have the same number of orders.")
print("These are the files and their number of orders:")
for i in range(len(type)):
print(' '+framename[i]+' %s' % norders[i])
sys.exit()
if np.max(np.abs(npx-npx[0])) == 0:
npx=int(npx[0])
else:
print("ERROR IN read_ESPRESSO_S2D: Not all files have the same number of pixels.")
print("These are the files and their number of pixels:")
for i in range(len(type)):
print(' '+framename[i]+' %s' % npx[i])
sys.exit()
if np.max(np.abs(nrv-nrv[0])) == 0:
nrv=int(nrv[0])
else:
print("ERROR IN read_ESPRESSO_S2D: Not all files have the same number of pixels.")
print("These are the files and their number of pixels:")
for i in range(len(type)):
print(' '+framename[i]+' %s' % npx[i])
sys.exit()
# print(wave[0][0,:]-wave[1][0,:])
# print(wave1d[0]-wave1d[2])
wave=wave[0]#SELECT ONLY THE FIRST WAVE FRAME. The rest is ignored.
wave1d=wave1d[0]
# else:
# if nowave == False:
# print("ERROR in read_HARPS_e2ds: No wave_A.fits file was detected.")
# print("These are the files in the folder:")
# for i in range(N):
# print(filelist[i])
# print("This may have happened if you downloaded the HARPS data from the")
# print("ADP query form, which doesn't include wave_A files (as far as I")
# print("have seen). Set the /nowave keyword in your call to read_HARPS_e2ds")
# print("if you indeed do not expect a wave_A file to be present.")
# if nowave == True:
# if all(x == wavefile_used[0] for x in wavefile_used):
# print("Nowave is set, and simple wavelength calibration extraction")
# print("works, as all files in the dataset used the same wave_A file.")
# wave=wave[0]
# else:
# print("ERROR IN read_HARPS_e2ds: Nowave is set, but not all files")
# print("in the dataset used the same wave_A file when the pipeline was")
# print("run. Catching this requres an interpolation step that is currently")
# print("not yet implemented. Exiting. These are the filenames and their")
# print("wave_A file used:")
# for i in range(N-1):
# print(' '+framename[i]+' %s' % wavefile_used[0])
# wave=wave[0]
# print("I ALLOW YOU TO CONTINUE BUT USING ONLY THE FIRST WAVELENGTH")
# print("SOLUTION. A PART OF THE DATA MAY BE AFFECTED BY HAVING ASSUMED")
# print("THE WRONG SOLUTION. If you are doing transits, you don't need")
# print("this kind of precision.")
if np.shape(wave) != np.shape(e2ds[0]):
print("ERROR in read_ESPRESSO_S2D: A wave file was detected but its shape (%s,%s) does not match that of the orders (%s,%s)" % (np.shape(wave)[0],np.shape(wave)[1],np.shape(e2ds[0])[0],np.shape(e2ds[0])[1]))
if len(s1dhdr) != len(e2ds) and molecfit == True:
print('ERROR in read_HARPS_e2ds: The number of s1d SCIENCE files and e2ds SCIENCE files is not the same. (%s vs %s)' % (len(s1dhdr),len(e2ds)))
print('Switching off the molecfit option will suppress this error.')
#Ok, so now we should have ended up with a number of lists that contain all
#the relevant information of our science frames.
#We determine how to sort the resulting lists in time:
sorting = np.argsort(mjd)
ccfsorting = np.argsort(ccfmjd)
s1dsorting = np.argsort(s1dmjd)
#First sort the s1d files for application of molecfit.
if molecfit == True:
s1dhdr_sorted=[]
s1d_sorted=[]
for i in range(len(s1dsorting)):
s1dhdr_sorted.append(s1dhdr[s1dsorting[i]])
s1d_sorted.append(s1d[s1dsorting[i]])
# print(s1dhdr_sorted[0])
#
# f=open('ILOVEHEADERS','w')
# for k in s1dhdr_sorted[0]:
# f.write(str(k)+'\n')
# f.close()
#
# sys.exit()
list_of_wls,list_of_trans = mol.do_molecfit(s1dhdr_sorted,s1d_sorted,wave=wave1d,load_previous=False,mode=mode)
mol.write_telluric_transmission_to_file(list_of_wls,list_of_trans,outpath+'telluric_transmission_spectra.pkl')
ccftotal = 0.0
#Now we loop over all exposures and collect the i-th order from each exposure,
#put these into a new matrix and save them to FITS images:
f=open(outpath+'obs_times','w',newline='\n')
headerline = 'MJD'+'\t'+'DATE'+'\t'+'EXPTIME'+'\t'+'MEAN AIRMASS'+'\t'+'BERV (km/s)'+'\t'+'FILE NAME'
for i in range(norders):
order = np.zeros((sci_count,npx))
ccforder = np.zeros((ccf_count,nrv))
wave_axis = wave[i,:]/10.0#Convert to nm.
print('CONSTRUCTING ORDER %s' % i)
c = 0#To count the number of science frames that have passed. The counter
# c is not equal to j because the list of files contains not only SCIENCE
# frames.
cc = 0#Same for ccfs
for j in range(len(ccfsorting)):
ccf=ccfs[ccfsorting[j]]
ccforder[cc,:] = ccf[i,:]
cc+=1
for j in range(len(sorting)):#Loop over exposures
if i ==0:
print('---'+type[sorting[j]]+' '+date[sorting[j]])
if type[sorting[j]] == 'SCIENCE':#This check may be redundant.
exposure = e2ds[sorting[j]]
order[c,:] = exposure[i,:]
#T_i = interp.interp1d(list_of_wls[j],list_of_trans[j])#This should be time-sorted, just as the e2ds files.
#Do a manual check here that the MJDs are identical.
#Also, determiine what to do with airtovac.
#tel_order[c,:] = T_i[wave_axis]
#Now I also need to write it to file.
if i ==0:#Only do it the first time, not for every order.
line = str(mjd[sorting[j]])+'\t'+date[sorting[j]]+'\t'+str(texp[sorting[j]])+'\t'+str(airmass[sorting[j]])+'\t'+str(berv[sorting[j]])+'\t'+framename[sorting[j]]+'\n'
f.write(line)
c+=1
ccftotal+=ccforder
fits.writeto(outpath+'ccf_'+str(i)+'.fits',ccforder,overwrite=True)
fits.writeto(outpath+'order_'+str(i)+'.fits',order,overwrite=True)
fits.writeto(outpath+'wave_'+str(i)+'.fits',wave_axis,overwrite=True)
fits.writeto(outpath+'ccftotal.fits',ccftotal,overwrite=True)
f.close()
print('Time-table written to '+outpath+'obs_times')
print('WARNING: FORMATTING IS STILL SCREWED UP!')
print('FIGURE OUT HOW TO FORMAT THOSE LINES IN A MORE HUMAN READABLE WAY')
print('WHEN YOU HAVE INTERNET AGAIN.')
def read_ESPRESSO_S2D_JULIA(inpath,outname,air=True,molecfit=True):
"""
reads in the ESPRESSO files and prepares them for use in molecfit
this functions then calls do_mmolecfit from the molecfit module
input:
inpath: type: string, path to the s2d ESPRESSO files
outpath: type: string, path to where the telluric correction should be saved
air: type:boolean, is the wavelength in air or vacuum
molecfit: type: boolean, function can be run with or without starting molecfit
note for Jens: The wavelength is much easier in ESPRESSO compared to HARPS, so I kicked out
all the parts needed for that in the HARPS function
Since Romain has for some reason only given me the S2D files and not the S1D files (God knows why),
ans also only fiber A (again, God knows why), this function only does molecfit on the sodium orders
Author: Julia V. Seidel.
"""
import os
import pdb
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import sys
import utils as ut
import molecfit as mol
import pyfits
import copy
import scipy.interpolate as interp
import pickle
#First check the input:
ut.typetest('inpath in read_ESPRESSO_S2D ',inpath,str)
ut.typetest('outname in read_ESPRESSO_S2D ',outname,str)
ut.typetest('air in read_ESPRESSO_S2D ',air,bool)
if os.path.exists(inpath) != True:
print("ERROR in read_ESPRESSO_S2D: Data input path (%s) does not exist." % inpath)
sys.exit()
filelist=os.listdir(inpath)
N=len(filelist)
if len(filelist) == 0:
print("ERROR in read_ESPRESSO_S2D: input folder (%s) is empty." % inpath)
sys.exit()
#The following variables define the lists in which all the necessary data will be stored.
framename=[]
header=[]
type=[]
texp=np.array([])
date=[]
npx=np.array([])
norders=np.array([])
mjd=np.array([])
ccfmjd=np.array([])
s1dmjd=np.array([])
s2d=[]
airmass=np.array([])
berv=np.array([])
wave=[]
outpath = ut.path(outname)
if os.path.exists(outpath) != True:
os.makedirs(outpath)
#ccftotal = 0 #This will hold the sum of the CCFs
s2d_count = 0
sci_count = 0
for i in range(N):
if filelist[i].endswith('S2D_A.fits'):
s2d_count += 1
print(filelist[i])
#data,hdr=fits.getdata(inpath+filelist[i],header=True)
hdul = fits.open(inpath+filelist[i])
data = copy.deepcopy(hdul[1].data)
hdr1 = hdul[0].header
hdr2 = hdul[1].header
wavedata=copy.deepcopy(hdul[5].data)
wave.append(wavedata)
hdul.close()
del hdul[0].data
if hdr2['EXTNAME'] == 'SCIDATA':
framename.append(filelist[i])
header.append(hdr1)
type.append(hdr2['EXTNAME'])
texp=np.append(texp,hdr1['EXPTIME'])
date.append(hdr1['DATE-OBS'])
mjd=np.append(mjd,hdr1['MJD-OBS'])
npx=np.append(npx,hdr2['NAXIS1'])
norders=np.append(norders,hdr2['NAXIS2'])
s2d.append(data)
s2d_count += 1
sci_count += 1
berv=np.append(berv,hdr1['HIERARCH ESO QC BERV'])
airmass=np.append(airmass,0.5*(hdr1['HIERARCH ESO TEL3 AIRM START']+hdr1['HIERARCH ESO TEL3 AIRM END']))
#Now we catch some errors:
#-The above should have read a certain number of s2d files.
#-A certain number of these should be SCIENCE frames.
#-There should be at least one WAVE file.
#-All exposures should have the same number of spectral orders.
#-All orders should have the same number of pixels (this is true for ESPRESSO).
#-The wave frame should have the same dimensions as the order frames.
#-If nowave is set, test that all frames used the same wave_A calibrator.
#-The blaze file needs to have the same shape as the s2d files.
#-The number of s1d files should be the same as the number of s2d files.
# if s2d_count != s1d_count:
# # print('ERROR in read_ESPRESSO_s2d: The numbers of 1ds and s2d files are different.')
# # print("These are the files and their types:")
# # for i in range(len(type)):
# # print(' '+framename[i]+' %s' % type[i])
# # sys.exit()
# if s2d_count == 0:
# print("ERROR in read_ESPRESSO_s2d: The input folder (%s) does not contain files ending in s2d.fits." % inpath)
# sys.exit()
# if sci_count == 0:
# print("ERROR in read_ESPRESSO_s2d: The input folder (%2) contains s2d files, but none of them are classified as SCIENCE frames with the HIERARCH ESO DPR CATG keyword.")
# print("These are the files and their types:")
# for i in range(len(type)):
# print(' '+framename[i]+' %s' % type[i])
# sys.exit()
# if np.max(np.abs(norders-norders[0])) == 0:
# norders=int(norders[0])
# else:
# print("ERROR in read_ESPRESSO_s2d: Not all files have the same number of orders.")
# print("These are the files and their number of orders:")
# for i in range(len(type)):
# print(' '+framename[i]+' %s' % norders[i])
# sys.exit()
# if np.max(np.abs(npx-npx[0])) == 0:
# npx=int(npx[0])
# else:
# print("ERROR IN read_ESPRESSO_s2d: Not all files have the same number of pixels.")
# print("These are the files and their number of pixels:")
# for i in range(len(type)):
# print(' '+framename[i]+' %s' % npx[i])
# sys.exit()
# if np.max(np.abs(nrv-nrv[0])) == 0:
# nrv=int(nrv[0])
# else:
# print("ERROR IN read_ESPRESSO_s2d: Not all files have the same number of pixels.")
# print("These are the files and their number of pixels:")
# for i in range(len(type)):
# print(' '+framename[i]+' %s' % npx[i])
# sys.exit()
#
# if len(s1dhdr) != len(s2d) and molecfit == True:
# print('ERROR in read_ESPRESSO_s2d: The number of s1d SCIENCE files and s2d SCIENCE files is not the same. (%s vs %s)' % (len(s1dhdr),len(s2d)))
# print('Switching off the molecfit option will suppress this error.')
#Ok, so now we should have ended up with a number of lists that contain all
#the relevant information of our science frames.
#We determine how to sort the resulting lists in time:
# sorting = np.argsort(mjd)
s2dsorting = np.argsort(mjd)
#First sort the s1d files for application of molecfit.
if molecfit == True:
s2dhdr_sorted=[]
s2d_sorted=[]
wave_sorted=[]
for i in s2dsorting:
s2dhdr_sorted.append(header[i])
s2d_sorted.append(s2d[i])
wave_sorted.append(wave[i])
# print('Molecfit will be executed onto the files in this order:')
# for x in s1dhdr_sorted:
# print(x['DATE-OBS'])
list_of_wls,list_of_trans = mol.do_molecfit(s2dhdr_sorted,s2d_sorted,mode='ESPRESSO',load_previous=False,order=116,wave=wave_sorted)
mol.write_telluric_transmission_to_file(list_of_wls,list_of_trans,outpath+'telluric_transmission_spectra.pkl')
# ccftotal = 0.0
#Now we loop over all exposures and collect the i-th order from each exposure,
#put these into a new matrix and save them to FITS images:
f=open(outpath+'obs_times','w',newline='\n')
headerline = 'MJD'+'\t'+'DATE'+'\t'+'EXPTIME'+'\t'+'MEAN AIRMASS'+'\t'+'BERV (km/s)'+'\t'+'FILE NAME'
for i in range(int(norders[0])):
order = np.zeros((sci_count,int(npx[0])))
wave_axis = wave[0][i]/10.0#Convert to nm.
# ccforder = np.zeros((ccf_count,nrv))
print('CONSTRUCTING ORDER %s' % i)
c = 0#To count the number of science frames that have passed. The counter
# c is not equal to j because the list of files contains not only SCIENCE
# frames.
# cc = 0#Same for ccfs
# for j in range(len(ccfsorting)):
# ccf=ccfs[ccfsorting[j]]
# ccforder[cc,:] = ccf[i,:]
# cc+=1
for j in range(len(s2dsorting)):#Loop over exposures
if i ==0:
print('---'+type[s2dsorting[j]]+' '+date[s2dsorting[j]])
exposure = s2d[s2dsorting[j]]
order[c,:] = exposure[i,:]
#T_i = interp.interp1d(list_of_wls[j],list_of_trans[j])#This should be time-sorted, just as the s2d files.
#Do a manual check here that the MJDs are identical.
#Also, determiine what to do with airtovac.
#tel_order[c,:] = T_i[wave_axis]
#Now I also need to write it to file.
if i ==0:#Only do it the first time, not for every order.
line = str(mjd[s2dsorting[j]])+'\t'+date[s2dsorting[j]]+'\t'+str(texp[s2dsorting[j]])+'\t'+str(airmass[s2dsorting[j]])+'\t'+str(berv[s2dsorting[j]])+'\t'+framename[s2dsorting[j]]+'\n'
f.write(line)
c+=1
# ccftotal+=ccforder
# fits.writeto(outpath+'ccf_'+str(i)+'.fits',ccforder,overwrite=True)
fits.writeto(outpath+'order_'+str(i)+'.fits',order,overwrite=True)
fits.writeto(outpath+'wave_'+str(i)+'.fits',wave_axis,overwrite=True)
# fits.writeto(outpath+'ccftotal.fits',ccftotal,overwrite=True)
f.close()
print('Time-table written to '+outpath+'obs_times')
|
"""
The script exports the answer table to a CSV file.
Config
------
CFG_NAME : The config name can be Develpment, Staging, Testing
Output
------
The total answer numbers after export, and the CSV file. (aswer_YYYY_MM_DD_HH_mm_ss.csv)
"""
CFG_NAME = "config.config.DevelopmentConfig"
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import csv
from models.model import db
from models.model import Answer
from models.model_operations import location_operations
from models.model_operations import answer_operations
from models.model_operations import user_operations
from config.config import Config
from flask import Flask
from controllers import root
import datetime
# init db
app = Flask(__name__)
app.register_blueprint(root.bp)
app.config.from_object(CFG_NAME)
db.init_app(app)
app.app_context().push()
cvs_file_name = "answer_" + datetime.datetime.today().strftime("%Y_%m_%d_%H_%M_%S") + ".csv"
print("Exporting answers to " + cvs_file_name + "...")
# Get all answers
answer_query = Answer.query.order_by(Answer.user_id)
answers = answer_query.all()
with open(cvs_file_name, "w", newline="") as csvDataFile:
# Write header
csvWriter = csv.writer(csvDataFile, delimiter=",", quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvWriter.writerow(["Userid", "client_id", "location_id", "factory_id", "answer_id", "land_usage", "expansion", "gold_standard_status",
"year_old", "year_new", "bbox_left_top_lat", "bbox_left_top_lng", "bbox_bottom_right_lat", "bbox_bottom_right_lng",
"zoom_level", "timestamp"])
for answer in answers:
# Write each record in answer table
factory_id = location_operations.get_location_by_id(answer.location_id).factory_id
client_id = user_operations.get_user_by_id(answer.user_id).client_id
csvWriter.writerow([answer.user_id, client_id, answer.location_id, factory_id, answer.id, answer.land_usage,answer.expansion,
answer.gold_standard_status, answer.year_old, answer.year_new, answer.bbox_left_top_lat, answer.bbox_left_top_lng, answer.bbox_bottom_right_lat,
answer.bbox_bottom_right_lng, answer.zoom_level, answer.timestamp])
print("{} records reported.".format(len(answers)))
db.session.remove()
db.session.close()
|
{
'targets': [
{
'target_name': 'zopflipng',
'sources': [
'src/zopflipng.cc',
'src/zopflipng/zopflipng_lib.cc',
'src/lodepng/lodepng.cc',
'src/lodepng/lodepng_util.cc',
"src/zopfli/deflate.c",
"src/zopfli/util.c",
"src/zopfli/blocksplitter.c",
"src/zopfli/lz77.c",
"src/zopfli/hash.c",
"src/zopfli/tree.c",
"src/zopfli/katajainen.c",
"src/zopfli/cache.c",
"src/zopfli/squeeze.c"
],
'include_dirs': [
'src',
"<!@(node -p \"require('node-addon-api').include\")"
],
'dependencies': [
"<!(node -p \"require('node-addon-api').gyp\")"
],
'cflags': [
'-fno-exceptions',
'-Wextra',
'-Wall',
'-Wno-unused-function',
'-Wno-unused-parameter',
'-ansi',
'-pedantic',
'-O3',
'-flto',
],
'cflags_cc': [
'-fno-exceptions',
'-Wextra',
'-Wall',
'-std=c++11',
'-pedantic',
'-O3',
'-flto',
],
'ldflags': [
'-flto'
],
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'NO',
'CLANG_CXX_LIBRARY': 'libc++',
'MACOSX_DEPLOYMENT_TARGET': '10.7',
'OTHER_CFLAGS': [
'-fno-exceptions',
'-Wextra',
'-Wall',
'-Wno-unused-function',
'-Wno-unused-parameter',
'-ansi',
'-pedantic',
'-O3',
'-flto'
],
'OTHER_CPLUSPLUSFLAGS': [
'-fno-exceptions',
'-Wextra',
'-Wall',
'-std=c++11',
'-pedantic',
'-O3',
'-flto'
],
'OTHER_LDFLAGS': [
'-flto'
]
},
'msvs_settings': {
'VCCLCompilerTool': { 'ExceptionHandling': 1 },
},
"defines": [
"NAPI_DISABLE_CPP_EXCEPTIONS",
"NAPI_VERSION=3",
"NDEBUG"
],
}
]
}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division # Standardmäßig float division - Ganzzahldivision kann man explizit mit '//' durchführen
from numbers import Number
import math
from pyecs import *
from pyecs.components import Pose
class Size(Component):
"""docstring for Size"""
def __init__(self, size, *args,**kwargs):
super(Size, self).__init__(*args,**kwargs)
self.size = size
@with_components(required=[Pose])
def bounding_box(self, pose):
if isinstance(self.size, Number):
# one-dimensional size
return (pose.x - self.size/2, pose.y - self.size/2, self.size, self.size)
elif type(self.size) == tuple:
# two-dimensional size
return (pose.x - self.size[0]/2, pose.y - self.size[1]/2, self.size[0], self.size[1])
def bounding_radius(self):
((mx,my),r) = self.bounding_circle()
return r
def bounding_circle(self):
# get bounding box
bbox = self.bounding_box()
x,y,w,h = bbox
w2,h2 = w/2,h/2
# center of bounding box
mx,my = x+w2,y+h2
# radius of enclosing circle is distance from
# center to any of the corner points of the bbox
r = math.sqrt(w2*w2+h2*h2)
return ((mx,my),r)
|
from rofl.functions.functions import *
from rofl.utils.bulldozer import composeMultiDiscrete, decomposeMultiDiscrete, decomposeObsWContextv0
from .base import QValue, construcConv, construcLinear, forwardConv, forwardLinear, layersFromConfig
class dqnAtari(QValue):
name = 'deep Qnetwork atari'
def __init__(self, config):
super().__init__(config)
self.noLinear = F.relu
lHist = config["agent"]["lhist"]
actions = config["policy"]["n_actions"]
obsShape = config["env"]["obs_shape"]
self.frameShape = (lHist, *obsShape)
self.outputs = actions
self.configLayers = layers = layersFromConfig(config)
self.features, _ = construcConv(self, obsShape, lHist, *layers['conv2d'])
construcLinear(self, self.features, actions, *layers['linear'])
def forward(self, obs):
x = forwardConv(self, obs)
x = x.flatten(1)
return forwardLinear(self, x)
class dqnAtariDueling(dqnAtari):
def __init__(self, config):
super().__init__(config)
actions = config["policy"]["n_actions"]
linearLayers = self.configLayers['linear']
self.linearOffset = offset = len(linearLayers) + 1
construcLinear(self, self.features, actions, *linearLayers, offset = offset)
def forward(self, obs):
x = forwardConv(self, obs)
x = x.flatten(1)
xVal = x.clone()
offset = self.linearOffset
xA = forwardLinear(self, x, offsetEnd = offset)
xVal = forwardLinear(self, x, offsetBeg = offset)
Amean = Tmean(xA, dim=1, keepdim=True)
return xVal + (xA - Amean)
class dqnCA(QValue):
name = 'dqn CA w/ support channels'
def __init__(self, config):
super().__init__(config)
self.noLinear = F.relu
actions = config["policy"]["n_actions"]
self.actionSpace = config['env']['action_space']
obsShape = config["env"]["obs_shape"]
lHist = config["agent"]["lhist"]
channels = config['agent'].get('channels', 1)
self.frameShape = (lHist * channels, *obsShape)
self.outputs = actions
self.configLayers = layers = layersFromConfig(config)
self.features, _ = construcConv(self, obsShape, lHist * channels, *layers['conv2d'])
construcLinear(self, self.features + 3, actions, *layers['linear'])
def forward(self, observation):
frame, context = decomposeObsWContextv0(observation, self.frameShape)
x = forwardConv(self, frame)
x = Tcat([x.flatten(1), context], dim = 1)
return forwardLinear(self, x)
def processAction(self, action):
return composeMultiDiscrete(action, self.actionSpace)
def unprocessAction(self, action, batch: bool):
return decomposeMultiDiscrete(action, self.actionSpace, batch, self.device)
class dqnCADueling(dqnCA):
name = 'dqn CA dueling w/ support channels'
def __init__(self, config):
super().__init__(config)
actions = config["policy"]["n_actions"]
linearLayers = self.configLayers['linear']
self.linearOffset = offset = len(linearLayers) + 1
construcLinear(self, self.features + 3, actions, *linearLayers, offset = offset)
def forward(self, observation):
frame, context = decomposeObsWContextv0(observation, self.frameShape)
x = forwardConv(self, frame)
x = Tcat([x.flatten(1), context], dim = 1)
xVal = x.clone()
offset = self.linearOffset
xA = forwardLinear(self, x, offsetEnd = offset)
xVal = forwardLinear(self, x, offsetBeg = offset)
Amean = Tmean(xA, dim=1, keepdim=True)
return xVal + (xA - Amean)
## TO BE DELETED ## TODO
class forestFireDQNres(QValue):
def __init__(self, config):
super(forestFireDQNres, self).__init__()
lHist = config["agent"]["lhist"]
actions = config["policy"]["n_actions"]
obsShape = config["env"]["obs_shape"]
self.config= config
self.lHist = lHist
self.outputs = actions
self.obsShape = obsShape[:2]
self.channels = chls = obsShape[2]
self.rectifier = F.relu
nCh = lHist * chls
self.cv1 = nn.Conv2d(nCh, 256, 3, 1, padding = 1)
self.bn1 = nn.BatchNorm2d(256)
dim = sqrConvDim(obsShape[0], 3, 1)
self.cv2 = nn.Conv2d(256, 256, 3, 1, padding = 1)
self.bn2 = nn.BatchNorm2d(256)
dim = sqrConvDim(dim, 3, 1)
self.cv3 = nn.Conv2d(256, 256, 3, 1, padding = 1)
self.bn3 = nn.BatchNorm2d(256)
self.cv4 = nn.Conv2d(256, 2, 1, 1)
self.bn4 = nn.BatchNorm2d(2)
dim = sqrConvDim(dim, 1, 1)
self.fc1 = nn.Linear(2 * obsShape[0] * obsShape[1], actions)
def forward(self, obs):
# TODO
obs = obs.reshape(obs.shape[0], obs.shape[1] * obs.shape[4], obs.shape[2], obs.shape[3])
r = self.rectifier
x0 = r(self.bn1(self.cv1(obs)))
#residual block
x = r(self.bn2(self.cv2(x0)))
x = r(self.bn3(self.cv3(x)) + x0)
# output
x = r(self.bn4(self.cv4(x)))
return self.fc1(x.flatten(1))
|
from open_publishing.assets import AssetLink
class Assets(object):
def __init__(self,
context):
self._ctx = context
def link(self, file_id):
return AssetLink(self._ctx, file_id)
def enqueue_import(self, filename, alias = None):
self._ctx.gjp.enqueue_import(filename, alias)
|
from django.shortcuts import render
# Create your views here.
from django.http import JsonResponse
def cartDetailAdd(req):
res=JsonResponse({
})
res['Access-Control-Allow-Origin']='*'
return res
def cartDetailList(req):
res=JsonResponse({
})
res['Access-Control-Allow-Origin']='*'
return res
def cartDetailDelete(req):
res=JsonResponse({
})
res['Access-Control-Allow-Origin']='*'
return res
def cartDetailUpdate(req):
res=JsonResponse({
})
res['Access-Control-Allow-Origin']='*'
return res
|
def add_root_indicator_segment():
root_indicators = {
'bash': ' \\$',
'zsh': ' %#',
'bare': ' $',
}
bg = Color.CMD_PASSED_BG
fg = Color.CMD_PASSED_FG
powerline.append(root_indicators[powerline.args.shell], fg, bg)
add_root_indicator_segment()
|
import json
from django.contrib import admin
from haystack.utils.highlighting import Highlighter
from haystack.forms import SearchForm
from haystack.generic_views import SearchView
from haystack.query import SearchQuerySet
from haystack.query import SQ
from haystack.inputs import AutoQuery
from ajax_select.lookup_channel import LookupChannel
import uuid
from django.utils.safestring import mark_safe
from django.urls import reverse
from django.views.generic.base import TemplateResponseMixin
from django import forms
#Delete View
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.utils import get_deleted_objects, unquote
from django.db import models, router, transaction
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import (
override as translation_override, string_concat, ugettext as _, ungettext,
)
from django.contrib.auth import get_permission_codename
TO_FIELD_VAR = '_to_field'
IS_POPUP_VAR = '_popup'
from guardian.shortcuts import get_perms
from imagekit.cachefiles.backends import CachedFileBackend
import apps.common.functions as commonfunctions
from mptt.admin import DraggableMPTTAdmin, JS
from django.core.serializers.json import DjangoJSONEncoder
class DeletedListFilter(admin.SimpleListFilter):
title = 'deleted'
parameter_name = 'deleted'
def lookups(self, request, model_admin):
return (('1','Yes'),('0','No'))
def choices(self, cl):
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == lookup,
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(deleted=1)
elif self.value() == '0':
return queryset.filter(deleted=0)
elif self.value() == None:
return queryset.filter(deleted=0)
class UUIDLookupChannel(LookupChannel):
def get_objects(self, ids):
"""
This is used to retrieve the currently selected objects for either ManyToMany or ForeignKey.
Args:
ids (list): list of primary keys
Returns:
list: list of Model objects
"""
if self.model._meta.pk.remote_field is not None:
# Use the type of the field being referenced
pk_type = self.model._meta.pk.target_field.to_python
else:
pk_type = self.model._meta.pk.to_python
# Return objects in the same order as passed in here
ids = [pk_type(pk) for pk in ids]
# uuid_to_id = []
# for id in ids:
# if int(id).bit_length() > 63:
# user = self.model.objects.get(uuid=uuid.UUID(id))
# uuid_to_id.append(str(user.uuid))
# else:
# uuid_to_id.append(id)
# ids = uuid_to_id
idcount = 0
for id in ids:
ids[idcount] = str(id)
idcount += 1
things = self.model.objects.in_bulk(ids)
for thing in things:
things[str(thing)] = things.pop(thing)
return [things[aid] for aid in ids if aid in things]
class ModelAdminOverwrite():
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
#Redo Perms Needed
if obj.deleted == False:
perms_needed = set()
for obj in [obj]:
add = True
opts = obj._meta
p = '%s.%s' % (opts.app_label,
get_permission_codename('trash', opts))
p_short = '%s' % (get_permission_codename('trash', opts))
if request.user.has_perm(p):
add = False
if p_short in get_perms(request.user,obj):
add = False
if add == True:
perms_needed.add(opts.verbose_name)
if request.POST and not protected: # The user has confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
class EditLinkToInlineObject(object):
def edit_link(self, instance):
url = reverse('admin:%s_%s_change' % (
instance._meta.app_label, instance._meta.model_name), args=[instance.pk] )
if instance.pk:
return mark_safe(u'<a class="editlink" href="{u}">edit</a>'.format(u=url))
else:
return ''
class LinkToInlineObject(object):
def copy_link(self, instance):
if instance.pk:
return mark_safe(u'<span class="linkto md-linkvariant" data-clipboard-text="https://{s}{u}" data-href="{u}"></span>'.format(s=instance.site.domain, u=instance.url))
else:
return ''
class CustomSearchForm(SearchForm):
site = forms.CharField(
widget=forms.HiddenInput,
required=False,
)
def search(self):
if not self.is_valid():
return self.no_query_found()
if not self.cleaned_data.get('q'):
return self.no_query_found()
q = self.cleaned_data['q']
sqs = SearchQuerySet().filter(SQ(content=AutoQuery(q)) | SQ(url=AutoQuery(q)) | SQ(node_type=AutoQuery(q)) | SQ(content_type=AutoQuery(q)))
if self.cleaned_data['site']:
sqs = sqs.filter(site=self.cleaned_data['site'])
if self.load_all:
sqs = sqs.load_all()
return sqs
class CustomSearchView(SearchView, TemplateResponseMixin):
form_class=CustomSearchForm
def get_template_names(self):
template_name='cmstemplates/{0}/pagelayouts/search-results.html'.format(
self.request.site.dashboard_general_site.template.namespace
)
return [template_name]
class Simple(CachedFileBackend):
"""
The most basic file backend. The storage is consulted to see if the file
exists. Files are generated synchronously.
"""
def generate(self, file, force=False):
exists = self.exists(file)
sourceexists = file.generator.source.storage.exists(
file.generator.source.name
)
if not sourceexists:
return
if not exists:
self.generate_now(file, force=True)
else:
self.generate_now(file, force=force)
def _exists(self, file):
return bool(file.storage.exists(file.name))
exists = _exists
class JustInTime(object):
"""
A strategy that ensures the file exists right before it's needed.
"""
def on_existence_required(self, file):
file.generate()
def on_content_required(self, file):
file.generate()
def on_source_saved(self, file):
commonfunctions.silentdelete_media(file.path)
file.generate(force=True)
class MyDraggableMPTTAdmin(DraggableMPTTAdmin):
def changelist_view(self, request, *args, **kwargs):
if request.is_ajax() and request.POST.get('cmd') == 'move_node':
return self._move_node(request)
response = super(DraggableMPTTAdmin, self).changelist_view(
request, *args, **kwargs)
try:
response.context_data['media'].add_css({'all': (
'mptt/draggable-admin.css',
)})
response.context_data['media'].add_js((
JS('mptt/draggable-admin.js', {
'id': 'draggable-admin-context',
'data-context': json.dumps(
self._tree_context(request), cls=DjangoJSONEncoder
),
}),
),)
except (AttributeError, KeyError):
# Not meant for us if there is no context_data attribute (no
# TemplateResponse) or no media in the context.
pass
return response
|
"""
This tests: (1) automatic creation of configuration properties; and
(2) assignment of default values that are specified in the schema and, in
some cases, are also processed at load time (paths resolved, csv strings
converted to lists, etc).
This module will test ALL schema properties, unless they are listed in the
global DO_NOT_TEST. Whenever a property's default value is changed (edits to
schema or configuration loading procedures), this test code must be modified to
reflect that change.
Test assumptions for a default configuration:
- If a default is set and not modified at load time, expect schema default.
- If a default is not set, expect null.
- If a default is set and modified at load time, the test should reflect that
(if a default is specified in the schema, it is expected that it will be used
in some form at load time; otherwise it should not be listed as a default).
Configuration options NOT tested:
- config_dir (value overridden for testing)
- data_dir (value overridden for testing)
- managed_config_dir (value depends on config_dir: see note above)
- new_file_path (value overridden for testing)
- logging (mapping loaded in config/; TODO)
- dependency_resolution (nested properties; TODO)
- job_config (no obvious testable defaults)
"""
import os
from collections import namedtuple
from datetime import timedelta
import pytest
from galaxy.util import listify
from galaxy_test.driver.driver_util import GalaxyTestDriver
OptionData = namedtuple('OptionData', ('key', 'expected', 'loaded'))
# Configuration properties that are paths should be absolute paths, by default resolved w.r.t root.
PATH_CONFIG_PROPERTIES = [
# For now, these include base config properties
'root',
'config_file',
'config_dir',
'managed_config_dir',
'data_dir',
'auth_config_file',
'blacklist_file',
'builds_file_path',
'citation_cache_data_dir',
'citation_cache_lock_dir',
'cluster_files_directory',
'containers_resolvers_config_file',
'data_manager_config_file',
'datatypes_config_file',
'dependency_resolvers_config_file',
'file_path',
'ftp_upload_dir',
'galaxy_data_manager_data_path',
'integrated_tool_panel_config',
'interactive_environment_plugins_directory',
'involucro_path',
'job_config_file',
'job_resource_params_file',
'job_working_directory',
'len_file_path',
'library_import_dir',
'markdown_export_css',
'markdown_export_css_pages',
'markdown_export_css_invocation_reports',
'migrated_tools_config',
'new_file_path',
'nginx_upload_job_files_path',
'nginx_upload_job_files_store',
'nginx_upload_path',
'object_store_config_file',
'oidc_backends_config_file',
'oidc_config_file',
'openid_consumer_cache_path',
'sanitize_whitelist_file',
'shed_data_manager_config_file',
'shed_tool_config_file',
'shed_tool_data_path',
'shed_tool_data_table_config',
'template_cache_path',
'tool_data_path',
'tool_dependency_cache_dir',
'tool_path',
'tool_sheds_config_file',
'user_preferences_extra_conf_path',
'webhooks_dir',
'workflow_resource_params_file',
'workflow_resource_params_mapper',
'workflow_schedulers_config_file',
]
# TODO: fix or mark as not absolute (2 are lists):
# - 'tool_config_file',
# - 'tool_data_table_config_path',
# - 'tool_dependency_dir',
# - 'tool_test_data_directories',
# - 'tour_config_dir',
# - 'visualization_plugins_directory',
# Most of these (except root_dir) will go away once path_resolves_to is set in the schema
RESOLVE = {
'auth_config_file': 'config_dir',
'builds_file_path': 'tool_data_path',
'dependency_resolvers_config_file': 'config_dir',
'integrated_tool_panel_config': 'managed_config_dir',
'involucro_path': 'root_dir',
'job_resource_params_file': 'config_dir',
'len_file_path': 'tool_data_path',
'object_store_config_file': 'config_dir',
'oidc_backends_config_file': 'config_dir',
'oidc_config_file': 'config_dir',
'sanitize_whitelist_file': 'root_dir',
'shed_data_manager_config_file': 'managed_config_dir',
'shed_tool_config_file': 'managed_config_dir',
'shed_tool_data_path': 'tool_data_path',
'shed_tool_data_table_config': 'managed_config_dir',
'tool_data_path': 'root_dir',
'tool_path': 'root_dir',
'tool_sheds_config_file': 'config_dir',
'user_preferences_extra_conf_path': 'config_dir',
'workflow_resource_params_file': 'config_dir',
'workflow_schedulers_config_file': 'config_dir',
}
def expected_default_config_dir(value):
# expected absolute path to the default config dir (when NO galaxy.yml provided)
return os.path.join(DRIVER.app.config.root, 'lib', 'galaxy', 'config', 'sample')
CUSTOM = {
'config_dir': expected_default_config_dir,
'password_expiration_period': timedelta,
'toolbox_filter_base_modules': listify,
'mulled_channels': listify,
'user_library_import_symlink_whitelist': listify,
'tool_filters': listify,
'tool_label_filters': listify,
'tool_section_filters': listify,
'persistent_communication_rooms': listify,
}
# TODO: split into (1) do not test; and (2) todo: fix and test
DO_NOT_TEST = [
'admin_users', # may or may not be testable: special test value assigned
'allow_user_deletion', # broken: default overridden
'amqp_internal_connection', # may or may not be testable; refactor config/
'api_allow_run_as', # may or may not be testable: test value assigned
'build_sites_config_file', # broken: remove 'config/' prefix from schema
'chunk_upload_size', # broken: default overridden
'cleanup_job', # broken: default overridden
'conda_auto_init', # broken: default overridden
'config_dir', # value overridden for testing
'data_dir', # value overridden for testing
'data_manager_config_file', # broken: remove 'config/' prefix from schema
'database_connection', # untestable; refactor config/__init__ to test
'database_engine_option_max_overflow', # overridden for tests running on non-sqlite databases
'database_engine_option_pool_size', # overridden for tests runnign on non-sqlite databases
'database_template', # default value set for tests
'datatypes_config_file', # broken
'default_locale', # broken
'dependency_resolution', # nested properties
'disable_library_comptypes', # broken: default overridden with empty string
'expose_dataset_path', # broken: default overridden
'ftp_upload_purge', # broken: default overridden
'ftp_upload_dir_template', # dynamically sets os.path.sep
'galaxy_data_manager_data_path', # broken: review config/, possibly refactor
'galaxy_infrastructure_url', # broken
'galaxy_infrastructure_web_port', # broken
'heartbeat_log', # untestable; refactor config/__init__ to test
'id_secret', # broken: default overridden
'job_config', # no obvious testable defaults
'job_config_file', # broken: remove 'config/' prefix from schema
'job_metrics_config_file',
'job_working_directory', # broken; may or may not be able to test
'library_import_dir', # broken: default overridden
'logging', # mapping loaded in config/
'managed_config_dir', # depends on config_dir: see note above
'markdown_export_css', # default not used?
'markdown_export_css_pages', # default not used?
'markdown_export_css_invocation_reports', # default not used?
'master_api_key', # broken: default value assigned outside of config/
'migrated_tools_config', # needs more work (should work)
'monitor_thread_join_timeout', # broken: default overridden
'new_file_path', # value overridden for testing
'object_store_store_by', # broken: default overridden
'pretty_datetime_format', # untestable; refactor config/__init__ to test
'retry_metadata_internally', # broken: default overridden
'statsd_host', # broken: default overridden with empty string
'template_cache_path', # may or may not be able to test; may be broken
'tool_config_file', # default not used; may or may not be testable
'tool_data_table_config_path', # broken: remove 'config/' prefix from schema
'tool_test_data_directories', # untestable; refactor config/__init__ to test
'use_remote_user', # broken: default overridden
'use_tasked_jobs', # broken: default overridden
'user_library_import_dir', # broken: default overridden
'user_tool_filters', # broken: default overridden
'user_tool_label_filters', # broken: default overridden
'user_tool_section_filters', # broken: default overridden
'webhooks_dir', # broken; also remove 'config/' prefix from schema
'workflow_resource_params_mapper', # broken: remove 'config/' prefix from schema
]
@pytest.fixture(scope='module')
def driver(request):
request.addfinalizer(DRIVER.tear_down)
return DRIVER
def create_driver():
# Same approach as in functional/test_toolbox_pytest.py:
# We setup a global driver, so that the driver fixture can tear down the driver.
# Ideally `create_driver` would be a fixture and clean up after the yield,
# but that's not compatible with the use use of pytest.mark.parametrize:
# a fixture is not directly callable, so it cannot be used in place of get_config_data.
global DRIVER
DRIVER = GalaxyTestDriver()
DRIVER.setup()
def get_config_data():
def load_parent_dirs():
return {
'root_dir': DRIVER.app.config.root,
'config_dir': DRIVER.app.config.config_dir,
'managed_config_dir': DRIVER.app.config.managed_config_dir,
'data_dir': DRIVER.app.config.data_dir,
'tool_data_path': DRIVER.app.config.tool_data_path,
}
def resolve(parent, child):
return os.path.join(parent, child) if child else parent
def get_expected(key, data, parent_dirs):
value = data.get('default')
parent = data.get('path_resolves_to')
if parent:
value = resolve(parent_dirs[parent], value)
if key in RESOLVE:
parent = RESOLVE[key]
value = resolve(parent_dirs[parent], value)
if key in CUSTOM:
value = CUSTOM[key](value)
return value
create_driver() # create + setup DRIVER
parent_dirs = load_parent_dirs() # called after DRIVER is setup
items = ((k, v) for k, v in DRIVER.app.config.appschema.items() if k not in DO_NOT_TEST)
for key, data in items:
expected_value = get_expected(key, data, parent_dirs)
loaded_value = getattr(DRIVER.app.config, key)
data = OptionData(key=key, expected=expected_value, loaded=loaded_value) # passed to test
yield pytest.param(data)
def get_path_data():
for key in PATH_CONFIG_PROPERTIES:
yield key
def get_key(option_data):
return option_data.key
@pytest.mark.parametrize('data', get_config_data(), ids=get_key)
def test_config_option(data, driver):
assert data.expected == data.loaded
@pytest.mark.parametrize('data', get_path_data())
def test_is_path_absolute(data, driver):
path = getattr(DRIVER.app.config, data)
if path:
assert os.path.isabs(path)
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import json
import logging
import paddle
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass
from paddle.fluid.contrib.slim.quantization import OutScaleForInferencePass
from paddle.fluid import core
from paddle.fluid.contrib.slim.quantization import WeightQuantization
from paddle.fluid.layer_helper import LayerHelper
from ..common import get_logger
_logger = get_logger(__name__, level=logging.INFO)
WEIGHT_QUANTIZATION_TYPES = [
'abs_max', 'channel_wise_abs_max', 'range_abs_max', 'moving_average_abs_max'
]
WEIGHT_QUANTIZATION_TYPES_TENSORRT = ['channel_wise_abs_max']
ACTIVATION_QUANTIZATION_TYPES = [
'abs_max', 'range_abs_max', 'moving_average_abs_max'
]
ACTIVATION_QUANTIZATION_TYPES_TENSORRT = [
'range_abs_max', 'moving_average_abs_max'
]
VALID_DTYPES = ['int8']
TRANSFORM_PASS_OP_TYPES = QuantizationTransformPass._supported_quantizable_op_type
QUANT_DEQUANT_PASS_OP_TYPES = AddQuantDequantPass._supported_quantizable_op_type
TENSORRT_OP_TYPES = [
'mul', 'conv2d', 'pool2d', 'depthwise_conv2d', 'elementwise_add',
'leaky_relu'
]
VARS_MAPPING_TABLE = './mapping_table_for_saving_inference_model'
_quant_config_default = {
# weight quantize type, default is 'channel_wise_abs_max'
'weight_quantize_type': 'channel_wise_abs_max',
# activation quantize type, default is 'moving_average_abs_max'
'activation_quantize_type': 'moving_average_abs_max',
# weight quantize bit num, default is 8
'weight_bits': 8,
# activation quantize bit num, default is 8
'activation_bits': 8,
# ops of name_scope in not_quant_pattern list, will not be quantized
'not_quant_pattern': ['skip_quant'],
# ops of type in quantize_op_types, will be quantized
'quantize_op_types': ['conv2d', 'depthwise_conv2d', 'mul'],
# data type after quantization, such as 'uint8', 'int8', etc. default is 'int8'
'dtype': 'int8',
# window size for 'range_abs_max' quantization. defaulf is 10000
'window_size': 10000,
# The decay coefficient of moving average, default is 0.9
'moving_rate': 0.9,
# if True, 'quantize_op_types' will be TENSORRT_OP_TYPES
'for_tensorrt': False,
# if True, 'quantoze_op_types' will be TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES
'is_full_quantize': False
}
def load_dict():
with open(VARS_MAPPING_TABLE, 'r') as file:
data = file.read()
data = json.loads(data)
return data
def save_dict(table):
with open(VARS_MAPPING_TABLE, 'w') as file:
file.write(json.dumps(table))
def _parse_configs(user_config):
"""
check if user's configs are valid.
Args:
user_config(dict): user's config.
Return:
configs(dict): final configs will be used.
"""
configs = copy.deepcopy(_quant_config_default)
configs.update(user_config)
assert isinstance(configs['for_tensorrt'], bool) and isinstance(
configs['is_full_quantize'],
bool), "'for_tensorrt' and 'is_full_quantize' must both be bool'"
# check if configs is valid
if configs['for_tensorrt']:
weight_types = WEIGHT_QUANTIZATION_TYPES_TENSORRT
activation_types = ACTIVATION_QUANTIZATION_TYPES_TENSORRT
platform = 'TensorRT'
else:
weight_types = WEIGHT_QUANTIZATION_TYPES
activation_types = WEIGHT_QUANTIZATION_TYPES
platform = 'PaddleLite'
assert configs['weight_quantize_type'] in weight_types, \
"Unknown weight_quantize_type: {}. {} only supports {} ".format(configs['weight_quantize_type'],
platform, weight_types)
assert configs['activation_quantize_type'] in activation_types, \
"Unknown activation_quantize_type: {}. {} only supports {}".format(configs['activation_quantize_type'],
platform, activation_types)
assert isinstance(configs['weight_bits'], int), \
"weight_bits must be int value."
assert (configs['weight_bits'] >= 1 and configs['weight_bits'] <= 16), \
"weight_bits should be between 1 and 16."
assert isinstance(configs['activation_bits'], int), \
"activation_bits must be int value."
assert (configs['activation_bits'] >= 1 and configs['activation_bits'] <= 16), \
"activation_bits should be between 1 and 16."
assert isinstance(configs['not_quant_pattern'], (list, str)), \
"not_quant_pattern must be list or str"
assert isinstance(configs['quantize_op_types'], list), \
"quantize_op_types must be a list"
if configs['for_tensorrt']:
configs['quantize_op_types'] = TENSORRT_OP_TYPES
elif configs['is_full_quantize']:
configs[
'quantize_op_types'] = TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES
else:
for op_type in configs['quantize_op_types']:
assert (op_type in QUANT_DEQUANT_PASS_OP_TYPES) or (
op_type in TRANSFORM_PASS_OP_TYPES), "{} is not support, \
now support op types are {}".format(
op_type,
TRANSFORM_PASS_OP_TYPES + QUANT_DEQUANT_PASS_OP_TYPES)
assert isinstance(configs['dtype'], str), \
"dtype must be a str."
assert (configs['dtype'] in VALID_DTYPES), \
"dtype can only be " + " ".join(VALID_DTYPES)
assert isinstance(configs['window_size'], int), \
"window_size must be int value, window size for 'range_abs_max' quantization, default is 10000."
assert isinstance(configs['moving_rate'], float), \
"moving_rate must be float value, The decay coefficient of moving average, default is 0.9."
return configs
def quant_aware(program,
place,
config=None,
scope=None,
for_test=False,
weight_quantize_func=None,
act_quantize_func=None,
weight_preprocess_func=None,
act_preprocess_func=None,
optimizer_func=None,
executor=None,
return_program=False):
"""Add quantization and dequantization operators to "program"
for quantization training or testing.
Args:
program(paddle.static.Program): training or testing ``program``.
place(paddle.CPUPlace or paddle.CUDAPlace): This parameter represents
the executor run on which device.
config(dict, optional): configs for quantization. if None, will use default config.
Default: None.
scope(paddle.static.Scope): Scope records the mapping between variable names and variables,
similar to brackets in programming languages. Usually users can use
`paddle.static.global_scope <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_. When ``None`` will use `paddle.static.global_scope() <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_ . Default: ``None``.
for_test(bool): If the 'program' parameter is a test program, this parameter should be set to ``True``.
Otherwise, set to ``False``.Default: False
weight_quantize_func(function): Function that defines how to quantize weight. Using this
can quickly test if user's quantization method works or not. In this function, user should
both define quantization function and dequantization function, that is, the function's input
is non-quantized weight and function returns dequantized weight. If None, will use
quantization op defined by 'weight_quantize_type'.
Default is None.
act_quantize_func(function): Function that defines how to quantize activation. Using this
can quickly test if user's quantization method works or not. In this function, user should
both define quantization and dequantization process, that is, the function's input
is non-quantized activation and function returns dequantized activation. If None, will use
quantization op defined by 'activation_quantize_type'.
Default is None.
weight_preprocess_func(function): Function that defines how to preprocess weight before quantization. Using this
can quickly test if user's preprocess method works or not. The function's input
is non-quantized weight and function returns processed weight to be quantized. If None, the weight will
be quantized directly.
Default is None.
act_preprocess_func(function): Function that defines how to preprocess activation before quantization. Using this
can quickly test if user's preprocess method works or not. The function's input
is non-quantized activation and function returns processed activation to be quantized. If None, the activation will
be quantized directly.
Default is None.
optimizer_func(function): Fuction return a optimizer. When 'is_test' is False and user want to use self-defined
quantization function and preprocess function, this function must be set. Default is None.
exe(paddle.static.Executor): If user want to use self-defined quantization function and preprocess function, exe must be set for
initialization. Default is None.
return_program(bool): If user want return value is a Program rather than Compiled Program, This argument should be set True.
Default is False.
Returns:
paddle.static.CompiledProgram | paddle.static.Program: Program with quantization and dequantization ``operators``
"""
scope = paddle.static.global_scope() if not scope else scope
if config is None:
config = _quant_config_default
else:
assert isinstance(config, dict), "config must be dict"
config = _parse_configs(config)
_logger.info("quant_aware config {}".format(config))
main_graph = IrGraph(core.Graph(program.desc), for_test=for_test)
transform_pass_ops = []
quant_dequant_ops = []
for op_type in config['quantize_op_types']:
if op_type in TRANSFORM_PASS_OP_TYPES:
transform_pass_ops.append(op_type)
elif op_type in QUANT_DEQUANT_PASS_OP_TYPES:
quant_dequant_ops.append(op_type)
if len(transform_pass_ops) > 0:
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
weight_bits=config['weight_bits'],
activation_bits=config['activation_bits'],
activation_quantize_type=config['activation_quantize_type'],
weight_quantize_type=config['weight_quantize_type'],
window_size=config['window_size'],
moving_rate=config['moving_rate'],
quantizable_op_type=transform_pass_ops,
skip_pattern=config['not_quant_pattern'],
weight_quantize_func=weight_quantize_func,
act_quantize_func=act_quantize_func,
weight_preprocess_func=weight_preprocess_func,
act_preprocess_func=act_preprocess_func,
optimizer_func=optimizer_func,
executor=executor)
transform_pass.apply(main_graph)
if len(quant_dequant_ops) > 0:
quant_dequant_pass = AddQuantDequantPass(
scope=scope,
place=place,
moving_rate=config['moving_rate'],
quant_bits=config['activation_bits'],
skip_pattern=config['not_quant_pattern'],
quantizable_op_type=quant_dequant_ops)
quant_dequant_pass.apply(main_graph)
out_scale_training_pass = OutScaleForTrainingPass(
scope=scope, place=place, moving_rate=config['moving_rate'])
out_scale_training_pass.apply(main_graph)
if (weight_preprocess_func is not None or
act_preprocess_func is not None) and not for_test:
_logger.info(
"When a preprocess_func is used in quant_aware, Need to save a mapping table to match variable names in the convert phase."
)
_logger.info("The mapping table is saved as '{}'.".format(
VARS_MAPPING_TABLE))
save_dict(main_graph.out_node_mapping_table)
if for_test or return_program:
quant_program = main_graph.to_program()
else:
quant_program = paddle.static.CompiledProgram(main_graph.graph)
return quant_program
def quant_post_static(
executor,
model_dir,
quantize_model_path,
batch_generator=None,
sample_generator=None,
model_filename=None,
params_filename=None,
save_model_filename='__model__',
save_params_filename='__params__',
batch_size=16,
batch_nums=None,
scope=None,
algo='hist',
hist_percent=0.9999,
bias_correction=False,
quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
is_full_quantize=False,
weight_bits=8,
activation_bits=8,
activation_quantize_type='range_abs_max',
weight_quantize_type='channel_wise_abs_max',
optimize_model=False,
is_use_cache_file=False,
cache_dir="./temp_post_training"):
"""
The function utilizes static post training quantization method to
quantize the fp32 model. It uses calibrate data to calculate the
scale factor of quantized variables, and inserts fake quantization
and dequantization operators to obtain the quantized model.
Args:
executor(paddle.static.Executor): The executor to load, run and save the
quantized model.
model_dir(str): The path of fp32 model that will be quantized, and
the model and params that saved by ``paddle.static.io.save_inference_model``
are under the path.
quantize_model_path(str): The path to save quantized model using api
``paddle.static.io.save_inference_model``.
batch_generator(Python Generator): The batch generator provides
calibrate data for DataLoader, and it returns a batch every
time. For sample_generator and batch_generator, only one
can be set. Beisdes, batch_generator supports lod tensor.
sample_generator(Python Generator): The sample generator provides
calibrate data for DataLoader, and it only returns a sample every time.
model_filename(str, optional): The name of model file. If parameters
are saved in separate files, set it as 'None'. Default: 'None'.
params_filename(str, optional): The name of params file.
When all parameters are saved in a single file, set it
as filename. If parameters are saved in separate files,
set it as 'None'. Default : 'None'.
save_model_filename(str): The name of model file to save the quantized inference program. Default: '__model__'.
save_params_filename(str): The name of file to save all related parameters.
If it is set None, parameters will be saved in separate files. Default: '__params__'.
batch_size(int, optional): The batch size of DataLoader, default is 16.
batch_nums(int, optional): If batch_nums is not None, the number of calibrate
data is 'batch_size*batch_nums'. If batch_nums is None, use all data
generated by sample_generator as calibrate data.
scope(paddle.static.Scope, optional): The scope to run program, use it to load
and save variables. If scope is None, will use paddle.static.global_scope().
algo(str, optional): If algo='KL', use KL-divergenc method to
get the scale factor. If algo='hist', use the hist_percent of histogram
to get the scale factor. If algo='mse', search for the best scale factor which
makes the mse loss minimal. Use one batch of data for mse is enough. If
algo='avg', use the average of abs_max values to get the scale factor. If
algo='abs_max', use abs_max method to get the scale factor. Default: 'hist'.
hist_percent(float, optional): The percentile of histogram for algo hist.Default:0.9999.
bias_correction(bool, optional): Bias correction method of https://arxiv.org/abs/1810.05723.
Default: False.
quantizable_op_type(list[str], optional): The list of op types
that will be quantized. Default: ["conv2d", "depthwise_conv2d",
"mul"].
weight_bits(int, optional): quantization bit number for weights.
activation_bits(int): quantization bit number for activation.
activation_quantize_type(str): quantization type for activation,
now support 'range_abs_max', 'moving_average_abs_max' and 'abs_max'.
This parameter only specifies the fake ops in quantized model.
If it is 'range_abs_max' or 'moving_average_abs_max', we save the scale
obtained by post training quantization in fake ops. If it
is 'abs_max', the scale will not be saved in fake ops.
weight_quantize_type(str): quantization type for weights,
support 'abs_max' and 'channel_wise_abs_max'. Compared to 'abs_max',
the model accuracy is usually higher when using 'channel_wise_abs_max'.
is_full_quantize(bool): if True, apply quantization to all supported quantizable op type.
If False, only apply quantization to the input quantizable_op_type. Default is False.
optimize_model(bool, optional): If set optimize_model as True, it applies some
passes to optimize the model before quantization. So far, the place of
executor must be cpu it supports fusing batch_norm into convs.
is_use_cache_file(bool): This param is deprecated.
cache_dir(str): This param is deprecated.
Returns:
None
"""
post_training_quantization = PostTrainingQuantization(
executor=executor,
sample_generator=sample_generator,
batch_generator=batch_generator,
model_dir=model_dir,
model_filename=model_filename,
params_filename=params_filename,
batch_size=batch_size,
batch_nums=batch_nums,
scope=scope,
algo=algo,
hist_percent=hist_percent,
bias_correction=bias_correction,
quantizable_op_type=quantizable_op_type,
is_full_quantize=is_full_quantize,
weight_bits=weight_bits,
activation_bits=activation_bits,
activation_quantize_type=activation_quantize_type,
weight_quantize_type=weight_quantize_type,
optimize_model=optimize_model)
post_training_quantization.quantize()
post_training_quantization.save_quantized_model(
quantize_model_path,
model_filename=save_model_filename,
params_filename=save_params_filename)
# We have changed the quant_post to quant_post_static.
# For compatibility, we keep quant_post api for now, and it will be
# deprecated in the future.
quant_post = quant_post_static
def convert(program, place, config=None, scope=None, save_int8=False):
"""
convert quantized and well-trained ``program`` to final quantized
``program``that can be used to save ``inference model``.
Args:
program(paddle.static.Program): quantized and well-trained ``test program``.
place(paddle.CPUPlace or paddle.CUDAPlace): This parameter represents
the executor run on which device.
config(dict, optional): configs for convert. if set None, will use
default config. It must be same with config that used in
'quant_aware'. Default is None.
scope(paddle.static.Scope, optional): Scope records the mapping between
variable names and variables, similar to brackets in
programming languages. Usually users can use
`paddle.static.global_scope <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_.
When ``None`` will use
`paddle.static.global_scope() <https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/api_cn/executor_cn/global_scope_cn.html>`_
. Default: ``None``.
save_int8: Whether to return ``program`` which model parameters'
dtype is ``int8``. This parameter can only be used to
get model size. Default: ``False``.
Returns:
Tuple : freezed program which can be used for inference.
when ``save_int8`` is False, return ``freezed_program(paddle.static.Program)``.
when ``save_int8`` is True, return ``freezed_program(paddle.static.Program)``
and ``freezed_program_int8(paddle.static.Program)``
"""
scope = paddle.static.global_scope() if not scope else scope
if config is None:
config = _quant_config_default
else:
assert isinstance(config, dict), "config must be dict"
config = _parse_configs(config)
_logger.info("convert config {}".format(config))
test_graph = IrGraph(core.Graph(program.desc), for_test=True)
out_scale_infer_pass = OutScaleForInferencePass(scope=scope)
out_scale_infer_pass.apply(test_graph)
# Freeze the graph after training by adjusting the quantize
# operators' order for the inference.
freeze_pass = QuantizationFreezePass(
scope=scope,
place=place,
weight_bits=config['weight_bits'],
activation_bits=config['activation_bits'],
weight_quantize_type=config['weight_quantize_type'])
if os.path.exists(VARS_MAPPING_TABLE):
test_graph.out_node_mapping_table = load_dict()
freeze_pass.apply(test_graph)
freezed_program = test_graph.to_program()
if save_int8:
convert_int8_pass = ConvertToInt8Pass(scope=scope, place=place)
convert_int8_pass.apply(test_graph)
freezed_program_int8 = test_graph.to_program()
return freezed_program, freezed_program_int8
else:
return freezed_program
def quant_post_dynamic(model_dir,
save_model_dir,
model_filename=None,
params_filename=None,
save_model_filename=None,
save_params_filename=None,
quantizable_op_type=["conv2d", "mul"],
weight_bits=8,
generate_test_model=False):
'''
The function utilizes static post training quantization method to
quantize the fp32 model. In details, it quantizes the weight of some
ops from float32 to int8/16. For the quantized model, there are two
kinds of calculation method in the reference stage. Firstly, the
quantized weight will be dequantized to float32, and then apply the
float32 calculation. Secondly, collect the quantized scales of the
inputs, and then apply the int8 calculation.
Args:
model_dir(str): The path of the fp32 model that will be quantized,
and the model and params files are under the path.
save_model_dir(str): The path to save the quantized model.
model_filename(str, optional): The name of file used to load the
inference program. If it is None, the default filename
'__model__' will be used. Default is 'None'.
params_filename(str, optional): The name of file used to load all
parameters. When all parameters were saved in a single
binary file, set it as the real filename. If parameters
were saved in separate files, set it as 'None'. Default is
'None'.
save_model_dir(str): The path used to save the quantized model.
save_model_filename(str, optional): The name of file to
save the inference program. If it is None, the default
filename '__model__' will be used. Default is 'None'.
save_params_filename(str, optional): The name of file to
save all parameters. If it is None, parameters were
saved in separate files. If it is not None, all
parameters were saved in a single binary file.
quantizable_op_type(list[str], optional): The list of ops
that will be quantized, and the quantized ops should be
contained in ["conv2d", "depthwise_conv2d", "mul"].
Default is ["conv2d", "depthwise_conv2d", "mul"].
weight_bits(int, optional): The bits for the quantized weight,
and it should be 8 or 16. Default is 8.
generate_test_model(bool, optional): If set generate_test_model
as True, it saves a fake quantized model, in which the weights
are quantized and dequantized. We can use PaddlePaddle to load
the fake quantized model and test the accuracy on GPU or CPU.
'''
weight_quant = WeightQuantization(
model_dir=model_dir,
model_filename=model_filename,
params_filename=params_filename)
weight_quant.quantize_weight_to_int(
save_model_dir=save_model_dir,
save_model_filename=save_model_filename,
save_params_filename=save_params_filename,
quantizable_op_type=quantizable_op_type,
weight_bits=weight_bits,
generate_test_model=generate_test_model)
# We have changed the quant_post_only_weight to quant_post_dynamic.
# For compatibility, we keep quant_post_only_weight api for now,
# and it will be deprecated in the future.
quant_post_only_weight = quant_post_dynamic
def pact(x, name=None):
helper = LayerHelper("pact", **locals())
dtype = 'float32'
init_thres = 20
u_param_attr = paddle.fluid.ParamAttr(
name=x.name + '_pact',
initializer=paddle.fluid.initializer.ConstantInitializer(
value=init_thres),
regularizer=paddle.fluid.regularizer.L2Decay(0.0001),
learning_rate=1)
u_param = helper.create_parameter(attr=u_param_attr, shape=[1], dtype=dtype)
x = paddle.fluid.layers.elementwise_sub(
x,
paddle.fluid.layers.relu(
paddle.fluid.layers.elementwise_sub(x, u_param)))
x = paddle.fluid.layers.elementwise_add(
x,
paddle.fluid.layers.relu(
paddle.fluid.layers.elementwise_sub(-u_param, x)))
return x
def get_pact_optimizer():
return paddle.fluid.optimizer.MomentumOptimizer(0.0001, 0.9)
|
import configparser
import os
import subprocess
import time
from distutils.util import strtobool
from pathlib import Path
from .util import import_string
from .docker import docker
from liquid_node.jobs import Job, liquid, hoover, dokuwiki, rocketchat, \
nextcloud, hypothesis, codimd, ci
def split_lang_codes(option):
option = option.strip()
if not option:
return []
return option.split(',')
class Configuration:
ALL_APPS = ('hoover', 'dokuwiki', 'rocketchat', 'nextcloud',
'hypothesis', 'codimd',)
# The core apps can't be turned off.
CORE_APPS = ('liquid', 'ingress',)
APP_TITLE = {
'dokuwiki': 'DokuWiki',
'rocketchat': "Rocket.Chat",
'codimd': "CodiMD",
}
APP_DESCRIPTION = {
'hoover': 'is a search app.',
'hypothesis': 'is an annotation system.',
'dokuwiki': 'is a wiki system used as a knowledge base for processed information.',
'codimd': 'is a real-time collaboration pad.',
'nextcloud': 'has a file share system and a contact list of users.',
'rocketchat': 'is the chat app.'
}
ALL_JOBS = [
liquid.Liquid(),
liquid.Ingress(),
liquid.CreateUser(),
hoover.Hoover(),
hoover.Deps(),
hoover.Workers(),
hoover.Proxy(),
hoover.Nginx(),
dokuwiki.Dokuwiki(),
dokuwiki.Proxy(),
rocketchat.Rocketchat(),
rocketchat.Deps(),
rocketchat.Migrate(),
rocketchat.Proxy(),
nextcloud.Nextcloud(),
nextcloud.Deps(),
nextcloud.Migrate(),
nextcloud.Periodic(),
nextcloud.Proxy(),
hypothesis.Hypothesis(),
hypothesis.Deps(),
hypothesis.UserSync(),
hypothesis.Proxy(),
codimd.Codimd(),
codimd.Deps(),
codimd.Proxy(),
ci.Drone(),
ci.Deps(),
ci.DroneWorkers(),
]
def __init__(self):
self.root = Path(__file__).parent.parent.resolve()
self.templates = self.root / 'templates'
self.ini = configparser.ConfigParser()
self.ini.read(self.root / 'liquid.ini')
self.versions_ini = configparser.ConfigParser()
if (self.root / 'versions.ini').is_file():
self.versions_ini.read(self.root / 'versions.ini')
self.version_track = self.ini.get('liquid', 'version_track', fallback='production')
self.track_ini = configparser.ConfigParser()
assert (self.root / (self.version_track + '-versions.ini')).is_file(), \
'invalid version_track'
self.track_ini.read(self.root / (self.version_track + '-versions.ini'))
self.cluster_root_path = self.ini.get('cluster', 'cluster_path', fallback=None)
self.consul_url = self.ini.get('cluster', 'consul_url', fallback='http://127.0.0.1:8500')
self.vault_url = self.ini.get('cluster', 'vault_url', fallback='http://127.0.0.1:8200')
self.vault_token = None
vault_secrets_path = self.ini.get('cluster', 'vault_secrets', fallback=None)
if vault_secrets_path:
secrets = configparser.ConfigParser()
secrets.read(self.root / vault_secrets_path)
self.vault_token = secrets.get('vault', 'root_token', fallback=None)
self.nomad_url = self.ini.get('cluster', 'nomad_url', fallback='http://127.0.0.1:4646')
self.liquid_domain = self.ini.get('liquid', 'domain', fallback='localhost')
default_title = ' '.join(map(str.capitalize, self.liquid_domain.split('.')))
self.liquid_title = self.ini.get('liquid', 'title', fallback=default_title)
self.liquid_debug = self.ini.getboolean('liquid', 'debug', fallback=False)
self.mount_local_repos = self.ini.getboolean('liquid', 'mount_local_repos', fallback=False)
hoover_repos_path = self.ini.get('liquid', 'hoover_repos_path',
fallback=str((self.root / 'repos' / 'hoover')))
self.hoover_repos_path = str(Path(hoover_repos_path).resolve())
li_repos_path = self.ini.get('liquid', 'liquidinvestigations_repos_path',
fallback=str((self.root / 'repos' / 'liquidinvestigations')))
self.liquidinvestigations_repos_path = str(Path(li_repos_path).resolve())
h_repos_path = self.ini.get('liquid', 'hypothesis_repos_path',
fallback=str((self.root / 'repos' / 'hypothesis')))
self.hypothesis_repos_path = str(Path(h_repos_path).resolve())
self.liquid_volumes = self.ini.get('liquid', 'volumes', fallback=None)
self.liquid_collections = self.ini.get('liquid', 'collections',
fallback=str(self.root / 'collections'))
self.liquid_http_port = self.ini.get('liquid', 'http_port', fallback='80')
self.https_enabled = 'https' in self.ini
if self.https_enabled:
self.liquid_http_protocol = 'https'
self.liquid_https_port = self.ini.get('https', 'https_port', fallback='443')
self.https_acme_email = self.ini.get('https', 'acme_email')
self.https_acme_caServer = self.ini.get(
'https',
'acme_caServer',
fallback="https://acme-staging-v02.api.letsencrypt.org/directory"
)
else:
self.liquid_http_protocol = self.ini.get('liquid', 'http_protocol_override', fallback='http')
self.liquid_core_url = f'{self.liquid_http_protocol}://{self.liquid_domain}'
self.auth_staff_only = self.ini.getboolean('liquid', 'auth_staff_only', fallback=False)
self.auth_auto_logout = self.ini.get('liquid', 'auth_auto_logout', fallback='2400h')
self.liquid_2fa = self.ini.getboolean('liquid', 'two_factor_auth', fallback=False)
self.elasticsearch_heap_size = self.ini.getint('liquid', 'elasticsearch_heap_size',
fallback=1024)
self.elasticsearch_memory_limit = self.ini.getint('liquid', 'elasticsearch_memory_limit',
fallback=1536)
self.elasticsearch_data_node_count = self.ini.getint('liquid', 'elasticsearch_data_node_count', fallback=0) # noqa: E501
self.tika_count = self.ini.getint('liquid', 'tika_count', fallback=1)
self.tika_memory_limit = self.ini.getint('liquid', 'tika_memory_limit', fallback=800)
self.hypothesis_memory_limit = \
self.ini.getint('liquid',
'hypothesis_memory_limit',
fallback=1024)
self.nextcloud_memory_limit = \
self.ini.getint('liquid',
'nextcloud_memory_limit',
fallback=512)
self.hoover_ratelimit_user = self.ini.get('liquid', 'hoover_ratelimit_user', fallback='100,13')
self.hoover_web_memory_limit = self.ini.getint('liquid',
'hoover_web_memory_limit', fallback=300)
self.hoover_web_count = self.ini.getint('liquid',
'hoover_web_count', fallback=1)
self.rocketchat_show_login_form = self.ini.getboolean('liquid', 'rocketchat_show_login_form', fallback=True) # noqa: E501
self.hoover_ui_override_server = self.ini.get('liquid', 'hoover_ui_override_server', fallback='')
self.hoover_es_max_concurrent_shard_requests = self.ini.getint(
'liquid', 'hoover_es_max_concurrent_shard_requests', fallback=''
)
self.hoover_ui_force_pull = self.ini.getboolean('liquid', 'hoover_ui_force_pull', fallback=False)
self.hoover_ui_agg_split = self.ini.getint('liquid', 'hoover_ui_agg_split', fallback=1)
self.hoover_ui_search_retry = self.ini.getint('liquid', 'hoover_ui_search_retry', fallback=1)
self.snoop_workers_enabled = self.ini.getboolean('snoop', 'enable_workers', fallback=True)
self.snoop_min_workers_per_node = self.ini.getint('snoop', 'min_workers_per_node', fallback=2)
self.snoop_max_workers_per_node = self.ini.getint('snoop', 'max_workers_per_node', fallback=4)
self.snoop_cpu_count_multiplier = self.ini.getfloat('snoop', 'worker_cpu_count_multiplier', fallback=0.85) # noqa: E501
self.snoop_rabbitmq_memory_limit = self.ini.getint('snoop', 'rabbitmq_memory_limit', fallback=700)
self.snoop_postgres_memory_limit = self.ini.getint('snoop', 'postgres_memory_limit', fallback=1400)
self.snoop_postgres_max_connections = self.ini.getint('snoop', 'postgres_max_connections', fallback=250) # noqa: E501
self.snoop_worker_memory_limit = 500 * (2 + self.snoop_min_workers_per_node)
self.snoop_worker_hard_memory_limit = 5000 * (2 + self.snoop_max_workers_per_node)
self.snoop_worker_cpu_limit = 1500 * self.snoop_min_workers_per_node
self.snoop_max_result_window = self.ini.getint('snoop', 'max_result_window', fallback=10000)
self.snoop_refresh_interval = self.ini.get('snoop', 'refresh_interval', fallback="6s")
self.snoop_pdf_preview_enabled = self.ini.getboolean('snoop', 'pdf_preview_enabled', fallback=False)
self.snoop_pdf_preview_count = self.ini.getint('snoop', 'pdf_preview_count', fallback=1)
self.snoop_pdf_preview_memory_limit = self.ini.getint('snoop', 'pdf_preview_memory_limit',
fallback=900)
self.snoop_thumbnail_generator_enabled = self.ini.getboolean('snoop', 'thumbnail_generator_enabled',
fallback=False)
self.snoop_thumbnail_generator_count = self.ini.getint('snoop', 'thumbnail_generator_count',
fallback=1)
self.snoop_thumbnail_generator_memory_limit = self.ini.getint('snoop',
'thumbnail_generator_memory_limit',
fallback=900)
self.snoop_image_classification_count = self.ini.getint('snoop', 'image_classification_count',
fallback=1)
self.snoop_image_classification_memory_limit = self.ini.getint('snoop',
'image_classification_memory_limit',
fallback=900)
self.snoop_image_classification_waitress_threads = \
self.ini.getint('snoop', 'image_classification_waitress_threads', fallback=30)
self.snoop_image_classification_object_detection_enabled = \
self.ini.getboolean('snoop', 'image_classification_object_detection_enabled', fallback=False)
self.snoop_image_classification_object_detection_model = \
self.ini.get('snoop', 'image_classification_object_detection_model', fallback='yolo')
self.snoop_image_classification_classify_images_enabled = \
self.ini.getboolean('snoop', 'image_classification_classify_images_enabled', fallback=False)
self.snoop_image_classification_classify_images_model = \
self.ini.get('snoop', 'image_classification_classify_images_model', fallback='mobilenet')
self.check_interval = self.ini.get('deploy', 'check_interval', fallback='24s')
self.check_timeout = self.ini.get('deploy', 'check_timeout', fallback='20s')
self.wait_max = self.ini.getfloat('deploy', 'wait_max_sec', fallback=300)
self.wait_interval = self.ini.getfloat('deploy', 'wait_interval', fallback=4)
self.wait_green_count = self.ini.getint('deploy', 'wait_green_count', fallback=6)
self.ci_enabled = 'ci' in self.ini
if self.ci_enabled:
self.ci_runner_capacity = self.ini.getint('ci', 'runner_capacity', fallback=4)
self.ci_docker_username = self.ini.get('ci', 'docker_username')
self.ci_docker_password = self.ini.get('ci', 'docker_password')
self.ci_github_client_id = self.ini.get('ci', 'github_client_id')
self.ci_github_client_secret = self.ini.get('ci', 'github_client_secret')
self.ci_github_user_filter = self.ini.get('ci', 'github_user_filter')
self.ci_target_hostname = self.ini.get('ci', 'target_hostname')
self.ci_target_username = self.ini.get('ci', 'target_username')
self.ci_target_password = self.ini.get('ci', 'target_password')
self.ci_target_port = self.ini.get('ci', 'target_port')
self.default_app_status = self.ini.get('apps', 'default_app_status', fallback='on')
self.all_jobs = list(self.ALL_JOBS)
self.enabled_jobs = [job for job in self.all_jobs if self.is_app_enabled(job.app)]
self.disabled_jobs = [job for job in self.all_jobs if not self.is_app_enabled(job.app)]
self.image_keys = set(self.track_ini['versions']) | \
(set(self.versions_ini['versions']) if 'versions' in self.versions_ini.sections() else set()) | \
(set(self.ini['versions']) if 'versions' in self.ini.sections() else set())
self.images = set(self._image(c) for c in self.image_keys)
self.snoop_collections = []
# load collections and extra jobs
for key in self.ini:
if ':' not in key:
continue
(cls, name) = key.split(':')
if cls == 'collection':
Configuration._validate_collection_name(name)
self.snoop_collections.append({
'name': name,
'process': self.ini.getboolean(key, 'process', fallback=False),
'sync': self.ini.getboolean(key, 'sync', fallback=False),
'ocr_languages': split_lang_codes(self.ini.get(key, 'ocr_languages', fallback='')),
'max_result_window': self.ini.getint(
key,
'max_result_window',
fallback=self.snoop_max_result_window
),
'refresh_interval': self.ini.getint(
key,
'refresh_interval',
fallback=self.snoop_refresh_interval),
})
elif cls == 'job':
self.enabled_jobs.append(self.load_job(name, self.ini[key]))
self.timestamp = int(time.time())
self.liquid_apps = []
for app in self.ALL_APPS:
self.liquid_apps.append({
'id': app,
'title': self.APP_TITLE.get(app) or app.title(),
'url': self.app_url(app),
'enabled': self.is_app_enabled(app),
'description': self.APP_DESCRIPTION[app],
'adminOnly': False,
'version': self.version(app),
})
self.liquid_version = self.get_node_version()
self.liquid_core_version = self.version('liquid-core')
self.liquid_apps.append({
'id': "nextcloud-admin",
'title': "Nextcloud Admin",
'url': self.app_url('nextcloud') + "/index.php/login?autologin=admin",
'enabled': self.is_app_enabled('nextcloud'),
'description': "will log you in as the Nextcloud admin user. "
"You may need to log out of Nextcloud first.",
'adminOnly': True,
'version': '',
})
def get_node_version(self):
try:
return subprocess.check_output(['git', 'describe', '--tags'], shell=False).decode().strip()
except subprocess.CalledProcessError:
return os.getenv('LIQUID_VERSION', 'unknown version')
def version(self, name):
def tag(name):
return self._image(name).split(':', 1)[1]
if name == 'hoover':
search = tag('hoover-search')
snoop = tag('hoover-snoop2')
ui = tag('hoover-ui')
return f'search: {search}, snoop: {snoop}, ui: {ui}'
if name == 'hypothesis':
h = tag('hypothesis-h')
client = tag('h-client')
return f'h: {h}, client: {client}'
if name in ['dokuwiki', 'nextcloud']:
return tag('liquid-' + name)
return tag(name)
def _image(self, name):
"""Returns the NAME:TAG for a docker image from versions.ini.
Can be overrided in liquid.ini, same section name.
"""
for x in [self.ini, self.versions_ini, self.track_ini]:
val = x.get('versions', name, fallback=None)
if val:
return val.strip()
else:
raise RuntimeError('image does not exist: ' + name)
def image(self, name):
"""Returns the NAME@SHA1 for a docker image from the docker system."""
return docker.image_digest(self._image(name))
def load_job(self, name, job_config):
if 'template' in job_config:
job = Job()
job.name = name
job.template = self.root / job_config['template']
return job
if 'loader' in job_config:
job_loader = import_string(job_config['loader'])
return job_loader(name, job_config, self)
raise RuntimeError("A job needs `template` or `loader`")
def app_url(self, name):
return f'{self.liquid_http_protocol}://{name}.{self.liquid_domain}'
def is_app_enabled(self, app_name):
if app_name == 'hoover-workers':
return self.snoop_workers_enabled and self.is_app_enabled('hoover')
if app_name == 'ci':
return self.ci_enabled
return app_name in Configuration.CORE_APPS or \
self.ini.getboolean('apps', app_name, fallback=strtobool(self.default_app_status))
@classmethod
def _validate_collection_name(self, name):
if not name.islower():
raise ValueError(f'''Invalid collection name "{name}"!
Collection names must start with lower case letters and must contain only
lower case letters and digits.
''')
config = Configuration()
|
#!/usr/bin/env python
# Package: Tragit
# Author: Abhishek Shrivastava <i.abhi27[at]gmail[dot]com>.
# License: BSD License
# TODO: NEED TO FIND A WAY TO ACCESS GITHUB VIA API TOKEN WITHOUT PASSWORD
# GITHUBLOGIN = os.popen('git config --global github.user').read().strip()
# GITHUBTOKEN = os.popen('git config --global github.token').read().strip()
# TODO: NEED TO ALLOW FOR COMMENTS IMPORT TOO
import os,sys
import base64
from httplib import HTTPSConnection
from json import JSONDecoder, JSONEncoder
GITHUBAPI = 'api.github.com'
class Github(object):
def __init__(self, username, password, project, projectsource):
self._username = username
self._password = password
self._project = project
self._projectsource = projectsource
self.labels = []
self.milestones = {}
self.collaborators = {}
def get_collaborators(self):
if not self.collaborators:
x = GithubRequest(self._username, self._password)
data = x.request('GET','/repos/%s/%s/collaborators' % (self._projectsource, self._project))
if 'message' in data:
self._error(data)
return False
self.collaborators = [y['login'] for y in data]
return self.collaborators
def get_milestones(self):
if not self.milestones:
x = GithubRequest(self._username, self._password)
data = x.request('GET','/repos/%s/%s/milestones?state=open' % (self._projectsource, self._project))
if 'message' in data:
self._error(data)
return False
self.milestones = dict([(y['title'], y['number']) for y in data])
data = x.request('GET','/repos/%s/%s/milestones?state=closed' % (self._projectsource, self._project))
if 'message' in data:
self._error(data)
return False
self.milestones.update(dict([(y['title'], y['number']) for y in data]))
return self.milestones
def create_milestone(self, ms_title, ms_desc = None, ms_dueon = None, ms_state = None):
x = GithubRequest(self._username, self._password)
milestone = {}
milestone['title'] = ms_title
if ms_desc != None:
milestone['description'] = ms_desc
if ms_state != None:
milestone['state'] = ms_state
if ms_dueon != None:
milestone['due_on'] = ms_dueon
print "Creating milestone : "+str(milestone)
data = x.request('POST','/repos/%s/%s/milestones' % (self._projectsource, self._project), milestone)
if 'title' in data and data['title'] == ms_title:
self.milestones[ms_title] = data['number']
return data['number']
self._error(data)
return False
def get_labels(self):
if not self.labels:
x = GithubRequest(self._username, self._password)
data = x.request('GET','/repos/%s/%s/labels' % (self._projectsource, self._project))
if 'message' in data:
self._error(data)
return False
self.labels = [x['name'] for x in data]
return self.labels
def create_label(self, lab_name, lab_color = '0000DD'):
x = GithubRequest(self._username, self._password)
label = {}
label['name'] = lab_name
label['color'] = lab_color
print "Creating label : "+str(label)
data = x.request('POST','/repos/%s/%s/labels' % (self._projectsource, self._project), label)
if 'name' in data and data['name'] == lab_name:
self.labels.append(lab_name)
return True
self._error(data)
return False
def create_issue(self, iss_title, iss_body = None, iss_assignee = None, iss_milestone = None, iss_labels = None):
x = GithubRequest(self._username, self._password)
issue = {}
issue['title'] = iss_title
if iss_body != None:
issue['body'] = iss_body
if iss_assignee != None and iss_assignee != '':
issue['assignee'] = iss_assignee
if iss_milestone != None and type(iss_milestone) == type(1):
issue['milestone'] = iss_milestone
if iss_labels != None and type(iss_labels) == type([]):
issue['labels'] = iss_labels
data = x.request('POST','/repos/%s/%s/issues' % (self._projectsource, self._project), issue)
if 'title' in data and data['title'] == iss_title:
return data['number']
self._error(data)
return False
def close_issue(self, iss_id):
x = GithubRequest(self._username, self._password)
issue = {}
issue['state'] = 'closed'
data = x.request('PATCH','/repos/%s/%s/issues/%d' % (self._projectsource, self._project, iss_id), issue)
if 'state' in data and data['state'] == 'closed':
return True
self._error(data)
return False
def get_error(self):
return self._last_error_data
def _error(self, data):
self._last_error_data = data
print "----------------ERROR--------------"
print data
print "----------------ERROR--------------"
class GithubRequest(object):
def __init__(self, username, password):
self._username = username
self._password = password
self._create_connection()
self._create_auth_header()
self._decoder = JSONDecoder()
self._encoder = JSONEncoder()
def _create_auth_header(self):
userpass = '%s:%s' % (self._username, self._password)
authkey = base64.b64encode(userpass).replace('\n','')
self._auth_header = {}
self._auth_header['Authorization'] = 'Basic %s' % authkey
def _create_connection(self):
self._connection = HTTPSConnection(GITHUBAPI)
self._connection.connect()
def request(self, method, url, params = None):
if params != None:
jsondata = self._encoder.encode(params)
else:
jsondata = None
self._connection.request(method,url,jsondata,self._auth_header)
response = self._connection.getresponse()
jsonresponse = response.read()
textresponse = self._decoder.decode(jsonresponse)
return textresponse
|
from gismeteo.scraper import GisMeteoScraper
name = "gis_meteo_scraper"
|
"""Sample API Client."""
import aiohttp
import asyncio
from switchbot import SwitchBot
from .const import LOGGER
class SwitchBotCloudApiClient:
"""Class to integration with SwitchBot Python library."""
def __init__(self, session: aiohttp.ClientSession) -> None:
"""Sample API Client."""
self._loop = asyncio.get_event_loop()
self._switchbot = None
self._task = None
async def authenticate(self, username: str, password: str) -> None:
"""Authenticate."""
assert self._switchbot is None
self._switchbot = await self._loop.run_in_executor(None, SwitchBot, username)
await self._loop.run_in_executor(None, self._switchbot.authenticate, password)
def start(self, callback):
"""Start coroutine to check devices."""
assert self._switchbot is not None
if self._task:
return
async def list_devices():
"""Coroutine to periodically get devices."""
while True:
devices = await self._loop.run_in_executor(
None, getattr, self._switchbot, "devices"
)
if devices:
callback(devices)
await asyncio.sleep(60)
self._task = self._loop.create_task(list_devices())
def stop(self):
"""End coroutine."""
if not self._task:
return
self._task.cancel()
self._task = None
|
import os
import platform
import subprocess
from six import BytesIO
def rel_path_to_url(rel_path, url_base="/"):
assert not os.path.isabs(rel_path), rel_path
if url_base[0] != "/":
url_base = "/" + url_base
if url_base[-1] != "/":
url_base += "/"
return url_base + rel_path.replace(os.sep, "/")
def from_os_path(path):
assert os.path.sep == "/" or platform.system() == "Windows"
if "/" == os.path.sep:
rv = path
else:
rv = path.replace(os.path.sep, "/")
if "\\" in rv:
raise ValueError("path contains \\ when separator is %s" % os.path.sep)
return rv
def to_os_path(path):
assert os.path.sep == "/" or platform.system() == "Windows"
if "\\" in path:
raise ValueError("normalised path contains \\")
if "/" == os.path.sep:
return path
return path.replace("/", os.path.sep)
def git(path):
def gitfunc(cmd, *args):
full_cmd = ["git", cmd] + list(args)
try:
return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT)
except Exception as e:
if platform.uname()[0] == "Windows" and isinstance(e, WindowsError):
full_cmd[0] = "git.bat"
return subprocess.check_output(full_cmd, cwd=path, stderr=subprocess.STDOUT)
else:
raise
try:
# this needs to be a command that fails if we aren't in a git repo
gitfunc("rev-parse", "--show-toplevel")
except (subprocess.CalledProcessError, OSError):
return None
else:
return gitfunc
class ContextManagerBytesIO(BytesIO):
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class cached_property(object):
def __init__(self, func):
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = func.__name__
def __get__(self, obj, cls=None):
if obj is None:
return self
if self.name not in obj.__dict__:
obj.__dict__[self.name] = self.func(obj)
obj.__dict__.setdefault("__cached_properties__", set()).add(self.name)
return obj.__dict__[self.name]
|
import pytest
from domestic.forms import (
MarketAccessAboutForm,
MarketAccessProblemDetailsForm,
MarketAccessSummaryForm,
)
pytestmark = pytest.mark.django_db
@pytest.fixture
def about_form_data():
business_type = 'I’m an exporter or investor, or ' 'I want to export or invest'
return {
'firstname': 'Craig',
'lastname': 'Smith',
'jobtitle': 'Musician',
'business_type': business_type,
'other_business_type': '',
'company_name': 'Craig Music',
'email': 'craig@craigmusic.com',
'phone': '0123456789',
}
@pytest.fixture
def about_form_data_with_other_business_type():
return {
'firstname': 'Craig',
'lastname': 'Smith',
'jobtitle': 'Musician',
'business_type': 'Other',
'other_business_type': 'Other business type',
'company_name': 'Craig Music',
'email': 'craig@craigmusic.com',
'phone': '0123456789',
}
def test_about_form_initial():
form = MarketAccessAboutForm()
assert form.fields['firstname'].initial is None
assert form.fields['lastname'].initial is None
assert form.fields['jobtitle'].initial is None
assert form.fields['business_type'].initial is None
assert form.fields['other_business_type'].initial is None
assert form.fields['company_name'].initial is None
assert form.fields['email'].initial is None
assert form.fields['phone'].initial is None
def test_about_form_mandatory_fields():
form = MarketAccessAboutForm(data={})
assert form.fields['firstname'].required is True
assert form.fields['lastname'].required is True
assert form.fields['jobtitle'].required is True
assert form.fields['business_type'].required is True
assert form.fields['other_business_type'].required is False
assert form.fields['company_name'].required is True
assert form.fields['email'].required is True
assert form.fields['phone'].required is True
def test_about_form_serialize(about_form_data):
form = MarketAccessAboutForm(data=about_form_data)
assert form.is_valid()
assert form.cleaned_data == about_form_data
def test_about_form_with_other_serializes(about_form_data_with_other_business_type):
form = MarketAccessAboutForm(data=about_form_data_with_other_business_type)
assert form.is_valid()
assert form.cleaned_data == about_form_data_with_other_business_type
def test_other_business_type_is_required_if_other_business_type(about_form_data_with_other_business_type):
about_form_data_with_other_business_type['other_business_type'] = ''
form = MarketAccessAboutForm(data=about_form_data_with_other_business_type)
assert len(form.errors) == 1
assert form.errors['other_business_type'] == ['Enter your organisation']
def test_about_form_error_messages():
form = MarketAccessAboutForm(data={})
assert len(form.errors) == 7
form.errors['firstname'] == ['Enter your first name']
form.errors['lastname'] == ['Enter your last name']
form.errors['jobtitle'] == ['Enter your job title']
form.errors['business_type'] == ['Enter your business type']
form.errors['company_name'] == ['Enter your company name']
form.errors['email'] == ['Enter your email']
form.errors['phone'] == ['Enter your phone number']
@pytest.fixture
def problem_details_form_data():
return {
'product_service': 'something',
'location': 'AO',
'problem_summary': 'problem summary',
'impact': 'problem impact',
'resolve_summary': 'steps in resolving',
'problem_cause': ['brexit'],
}
def test_problem_details_form_initial():
form = MarketAccessProblemDetailsForm()
assert form.fields['product_service'].initial is None
assert form.fields['location'].initial is None
assert form.fields['problem_summary'].initial is None
assert form.fields['impact'].initial is None
assert form.fields['resolve_summary'].initial is None
assert form.fields['problem_cause'].initial is None
def test_problem_details_form_mandatory_fields():
form = MarketAccessProblemDetailsForm(data={})
assert form.fields['product_service'].required is True
assert form.fields['location'].required is True
assert form.fields['problem_summary'].required is True
assert form.fields['impact'].required is True
assert form.fields['resolve_summary'].required is True
assert form.fields['problem_cause'].required is False
def test_problem_details_form_serialize(problem_details_form_data):
form = MarketAccessProblemDetailsForm(data=problem_details_form_data)
assert form.is_valid()
assert form.cleaned_data == {
'location_label': 'Angola',
'problem_cause_label': ['Brexit'],
**problem_details_form_data,
}
def test_problem_details_error_messages():
form = MarketAccessProblemDetailsForm(data={})
assert len(form.errors) == 5
form.errors['product_service'] == ['Tell us what you’re trying to export or invest in']
form.errors['location'] == ['Tell us where you are trying to export to or invest in']
form.errors['problem_summary'] == ['Tell us about the problem you’re facing']
form.errors['impact'] == ['Tell us how your business is being affected by the problem']
form.errors['resolve_summary'] == [
('Tell us what you’ve done to resolve your problem, ' 'even if this is your first step')
]
def test_summary_form():
form = MarketAccessSummaryForm(data={})
assert form.fields['contact_by_email'].required is False
assert form.fields['contact_by_phone'].required is False
|
__all__ = ('SquashFsImage', 'SquashedFile', 'SquashInode')
from .PySquashfsImage import SquashFsImage
from .PySquashfsImage import SquashedFile
from .PySquashfsImage import SquashInode
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="SubRenamerQt",
version="0.2.1-SNAPSHOT",
author="Zhang Zongyu",
author_email="zongyu@novazy.net",
description="Rename subtitle files intuitively",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/6-6-6/SubRenamerQt",
package_dir={'': 'src'},
packages=["SubRenamerQt"],
scripts=["bin/SubRenamerQt"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=["PyQt5"],
python_requires='>=3.7',
)
|
class Solution(object):
def smallestDivisor(self, nums, threshold):
"""
:type nums: List[int]
:type threshold: int
:rtype: int
"""
size, sum, num_max = len(nums), 0, 0
for i in range(size):
num_max = max(num_max, nums[i])
nums[i] -= 1
sum += nums[i]
if threshold == size:
return num_max
max_value = min(num_max, sum // (threshold - size) + 1)
min_value = sum // threshold
while True:
mid_value = (max_value + min_value) // 2
if mid_value == min_value:
break
division = 0
for num in nums:
division += num // mid_value
if division <= threshold - size:
max_value = mid_value
else:
min_value = mid_value
return max_value
print(Solution().smallestDivisor([1,2,5,9], 6)) # 5
print(Solution().smallestDivisor([2,3,5,7,11], 11)) # 3
print(Solution().smallestDivisor([2,3,5,7,11], 5)) # 11
print(Solution().smallestDivisor([19], 5)) # 4
# Given an array of integers nums and an integer threshold, we will choose a positive integer divisor and divide all the array by it and sum the result of the division. Find the smallest divisor such that the result mentioned above is less than or equal to threshold.
# Each result of division is rounded to the nearest integer greater than or equal to that element. (For example: 7/3 = 3 and 10/2 = 5).
# It is guaranteed that there will be an answer.
#
# Example 1:
# Input: nums = [1,2,5,9], threshold = 6
# Output: 5
# Explanation: We can get a sum to 17 (1+2+5+9) if the divisor is 1.
# If the divisor is 4 we can get a sum to 7 (1+1+2+3) and if the divisor is 5 the sum will be 5 (1+1+1+2).
# Example 2:
# Input: nums = [2,3,5,7,11], threshold = 11
# Output: 3
# Example 3:
# Input: nums = [19], threshold = 5
# Output: 4
#
# Constraints:
# 1 <= nums.length <= 5 * 10^4
# 1 <= nums[i] <= 10^6
# nums.length <= threshold <= 10^6
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/find-the-smallest-divisor-given-a-threshold
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
|
from trakt_sync.differ.handlers.collection import Collection
from trakt_sync.differ.handlers.list import List
from trakt_sync.differ.handlers.lists import Lists
from trakt_sync.differ.handlers.playback import Playback
from trakt_sync.differ.handlers.ratings import Ratings
from trakt_sync.differ.handlers.watched import Watched
from trakt_sync.differ.handlers.watchlist import Watchlist
__all__ = [
'Collection',
'List',
'Lists',
'Playback',
'Ratings',
'Watched',
'Watchlist'
]
|
from psyrun import Param
from psyrun.store.npz import NpzStore
pspace = Param(x=range(4))
store = NpzStore()
def execute(x):
return {'y': x ** 2}
|
def countVowels(word):
count = 0
vowels = 'AaEeIiOoUu'
for i in word:
if (i in vowels):
count +=1
return count
print(countVowels('water'))
|
"""project: Text classification that decodes integers pointing to words
to words pointing to integers and determines whether the review is
good or bad with a limit of 250 words max in a review."""
import tensorflow as tf
from tensorflow import keras
import numpy as np
from tensorflow.python.ops.gen_array_ops import reverse
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #gpu
#load in movie dataset
data = keras.datasets.imdb
#since this dataset contains a bunch of words, we want the 10000th most frequent words
#prints out integer encoded words - each integer stands for a word
(train_data, train_labels), (test_data, test_labels) = data.load_data(num_words=88000)
word_index = data.get_word_index() #gives us a tuple
word_index = {k : (v + 3) for k, v in word_index.items()} #break the tuple into key and value pairings k = word, v = integer
#allows personal value assigning
word_index["<PAD>"] = 0 #assign as a max limit to the amount of words that can be used in a review
word_index["<START>"] = 1
word_index["<UNK>"] = 2
word_index["<UNUSED>"] = 3
#swap values in the keys
#we currently have integer pointing to words, but we reverse to get the words pointing to the integers
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
#redefine testing data
train_data = keras.preprocessing.sequence.pad_sequences(train_data, value=word_index["<PAD>"], padding="post", maxlen=250)
test_data = keras.preprocessing.sequence.pad_sequences(test_data, value=word_index["<PAD>"], padding="post", maxlen=250)
def decode_review(text):
return " ".join([reverse_word_index.get(i, "?") for i in text]) #try to get index i, otherwise return a ?
#model
#final output expectation : whether the review is good or bad
#neuron output will be either 0 or 1 to give us a probability where a review is a certain percentage either 0 or 1
model = keras.Sequential()
model.add(keras.layers.Embedding(88000, 16)) #groups words in a similar way
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation="relu"))
model.add(keras.layers.Dense(1, activation="sigmoid"))
model.summary()
#define mode
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
#check how well model is working
x_val = train_data[:10000] #just get 10000 instead of 25000 entries
x_train = train_data[10000:]
y_val = train_labels[:10000]
y_train = train_labels[10000:]
#fit model
fitModel = model.fit(x_train, y_train, epochs=40, batch_size=512, validation_data=(x_val, y_val), verbose=1)
results = model.evaluate(test_data, test_labels)
print(results)
model.save("model.h5") #h5 is an extension for saving model in keras tf
#looks up mapping for all words and returns an encoded list
def review_encode(s):
encoded = [1] #<START> = 1, setting a starting tag
for word in s:
if word.lower() in word_index:
encoded.append(word_index[word.lower()])
else:
encoded.append(2)
return encoded
model = keras.models.load_model("model.h5")
#load in outside sample data file
with open("testmodel.txt", encoding="utf-8") as f:
for line in f.readlines():
nline = line.replace(",", "").replace(".", "").replace("(", "").replace(")", "").replace(":", "").replace("\"", "").strip().split(" ")
encode = review_encode(nline)
encode = keras.preprocessing.sequence.pad_sequences([encode], value=word_index["<PAD>"], padding="post", maxlen=250)
predict = model.predict(encode)
print(line) #original text
print(encode) #encoded review
print(predict[0]) #whether the model thinks the review is negative or positive
|
__author__ = 'samiths'
from math import pow, e, log as ln, log10
def annuity_future_value(p, r, n):
return p*((pow(1+r, n)-1)/r)
def annuity_fv_continuous_compounding(cf, r, t):
return cf*((pow(e, r*t)-1)/(pow(e, r)-1))
def annuity_present_value(p, r, n):
return p*((1-pow(1+r, n*-1))/r)
def annuity_fv_solve_for_n(fv, p, r):
return ln(1+((fv*r)/p))/ln(1+r)
def annuity_pv_solve_for_n(pv, p, r):
return ln(pow(1-((pv*r)/p), -1))/ln(1+r)
def annuity_payment_pv(pv, r, n):
return (r*pv)/(1-pow(1+r, n*-1))
def annuity_payment_fv(fv, r, n):
return (fv*r)/(pow(1+r, n)-1)
def annuity_pv_factor(r, n):
return (1-pow(1+r, n*-1))/r
def annuity_due_payment_fv(fv, r, n):
return fv*(r/(pow(1+r, n)-1))*(1/(1+r))
def annuity_due_payment_pv(pv, r, n):
return pv*(r/(1-pow(1+r, n*-1)))*(1/(1+r))
def doubling_time(r):
return log10(2)/(log10(1+r))
def doubling_time_continues_compounding(r):
return ln(2)/r
def future_value(c0, r, n):
return c0 * pow(1+r, n)
def fv_continues_compounding(pv, r, t):
return pv*pow(e, r*t)
def fv_factor(r, n):
return pow(1+r, n)
def present_value(c1, r, n):
return c1/pow(1+r, n)
def pv_continues_compounding(c, r, t):
return c/pow(e, r*t)
def pv_factor(r, n):
return 1/pow(1+r, n)
def weighted_avg(values):
"""
TODO: Handle error
"""
wavg = 0
for (val, weight) in values:
wavg += val*weight
pass
return wavg
def rule_of_72(r):
return 72/r
def perpetuity(d, r):
return d/r
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'e:\SOFTWARE\PythonProjects\youtubedl_GUI_pyqt\interface\main.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(986, 776)
MainWindow.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("C:/Users/Mateus/.designer/assets/ytdl.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setStyleSheet("QToolButton:hover,.QPushButton:hover{\n"
" background-color: rgb(0, 120, 215)\n"
"}\n"
"\n"
"QToolButton#CornerAbout:hover{\n"
" background-color: transparent\n"
"}\n"
"\n"
"QDockWidget {\n"
" background-color: rgb(205, 205, 205);\n"
" color: rgb(0, 0, 0)\n"
"}")
self.MainWidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MainWidget.sizePolicy().hasHeightForWidth())
self.MainWidget.setSizePolicy(sizePolicy)
self.MainWidget.setMinimumSize(QtCore.QSize(0, 0))
self.MainWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.MainWidget.setStyleSheet("")
self.MainWidget.setObjectName("MainWidget")
self.gridLayout = QtWidgets.QGridLayout(self.MainWidget)
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.gridLayout.setObjectName("gridLayout")
self.ConfigGroup = QtWidgets.QGroupBox(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ConfigGroup.sizePolicy().hasHeightForWidth())
self.ConfigGroup.setSizePolicy(sizePolicy)
self.ConfigGroup.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.ConfigGroup.setObjectName("ConfigGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.ConfigGroup)
self.gridLayout_2.setContentsMargins(9, 9, 9, 9)
self.gridLayout_2.setObjectName("gridLayout_2")
self.ConfigOutput = QtWidgets.QWidget(self.ConfigGroup)
self.ConfigOutput.setObjectName("ConfigOutput")
self.gridLayout_3 = QtWidgets.QGridLayout(self.ConfigOutput)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.TemplateLabel = QtWidgets.QLabel(self.ConfigOutput)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TemplateLabel.sizePolicy().hasHeightForWidth())
self.TemplateLabel.setSizePolicy(sizePolicy)
self.TemplateLabel.setObjectName("TemplateLabel")
self.gridLayout_3.addWidget(self.TemplateLabel, 0, 0, 1, 1)
self.RangeLabel = QtWidgets.QLabel(self.ConfigOutput)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.RangeLabel.sizePolicy().hasHeightForWidth())
self.RangeLabel.setSizePolicy(sizePolicy)
self.RangeLabel.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.RangeLabel.setObjectName("RangeLabel")
self.gridLayout_3.addWidget(self.RangeLabel, 1, 0, 1, 1)
self.RangeInput = QtWidgets.QLineEdit(self.ConfigOutput)
self.RangeInput.setMinimumSize(QtCore.QSize(100, 0))
self.RangeInput.setMaximumSize(QtCore.QSize(100, 16777215))
self.RangeInput.setObjectName("RangeInput")
self.gridLayout_3.addWidget(self.RangeInput, 1, 1, 1, 2)
self.TemplateInput = QtWidgets.QComboBox(self.ConfigOutput)
self.TemplateInput.setEditable(True)
self.TemplateInput.setInsertPolicy(QtWidgets.QComboBox.InsertAtTop)
self.TemplateInput.setDuplicatesEnabled(True)
self.TemplateInput.setObjectName("TemplateInput")
self.gridLayout_3.addWidget(self.TemplateInput, 0, 1, 1, 2)
self.gridLayout_2.addWidget(self.ConfigOutput, 0, 2, 1, 1)
self.sep1 = QtWidgets.QFrame(self.ConfigGroup)
self.sep1.setFrameShape(QtWidgets.QFrame.VLine)
self.sep1.setFrameShadow(QtWidgets.QFrame.Sunken)
self.sep1.setObjectName("sep1")
self.gridLayout_2.addWidget(self.sep1, 0, 1, 1, 1)
self.MediaType = QtWidgets.QWidget(self.ConfigGroup)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MediaType.sizePolicy().hasHeightForWidth())
self.MediaType.setSizePolicy(sizePolicy)
self.MediaType.setObjectName("MediaType")
self.gridLayout_8 = QtWidgets.QGridLayout(self.MediaType)
self.gridLayout_8.setContentsMargins(0, 0, 0, 0)
self.gridLayout_8.setObjectName("gridLayout_8")
self.ExportLabel = QtWidgets.QLabel(self.MediaType)
self.ExportLabel.setObjectName("ExportLabel")
self.gridLayout_8.addWidget(self.ExportLabel, 0, 0, 1, 1)
self.VideoOption = QtWidgets.QRadioButton(self.MediaType)
self.VideoOption.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.VideoOption.setChecked(True)
self.VideoOption.setObjectName("VideoOption")
self.gridLayout_8.addWidget(self.VideoOption, 0, 1, 1, 1)
self.AudioOption = QtWidgets.QRadioButton(self.MediaType)
self.AudioOption.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.AudioOption.setObjectName("AudioOption")
self.gridLayout_8.addWidget(self.AudioOption, 1, 1, 1, 1)
self.gridLayout_2.addWidget(self.MediaType, 0, 0, 1, 1)
self.gridLayout.addWidget(self.ConfigGroup, 1, 0, 1, 1)
self.OutputGroup = QtWidgets.QGroupBox(self.MainWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.OutputGroup.sizePolicy().hasHeightForWidth())
self.OutputGroup.setSizePolicy(sizePolicy)
self.OutputGroup.setMaximumSize(QtCore.QSize(16777215, 70))
self.OutputGroup.setObjectName("OutputGroup")
self.gridLayout_4 = QtWidgets.QGridLayout(self.OutputGroup)
self.gridLayout_4.setObjectName("gridLayout_4")
self.DestinationInput = QtWidgets.QLineEdit(self.OutputGroup)
self.DestinationInput.setObjectName("DestinationInput")
self.gridLayout_4.addWidget(self.DestinationInput, 0, 2, 1, 1)
self.DestinationLabel = QtWidgets.QLabel(self.OutputGroup)
self.DestinationLabel.setObjectName("DestinationLabel")
self.gridLayout_4.addWidget(self.DestinationLabel, 0, 0, 1, 1)
self.DestinationButton = QtWidgets.QToolButton(self.OutputGroup)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.DestinationButton.sizePolicy().hasHeightForWidth())
self.DestinationButton.setSizePolicy(sizePolicy)
self.DestinationButton.setMinimumSize(QtCore.QSize(24, 26))
self.DestinationButton.setMaximumSize(QtCore.QSize(24, 25))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/folder_yellow.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.DestinationButton.setIcon(icon1)
self.DestinationButton.setObjectName("DestinationButton")
self.gridLayout_4.addWidget(self.DestinationButton, 0, 1, 1, 1)
self.gridLayout.addWidget(self.OutputGroup, 2, 0, 1, 1)
self.InputGroup = QtWidgets.QGroupBox(self.MainWidget)
self.InputGroup.setMaximumSize(QtCore.QSize(16777215, 70))
self.InputGroup.setObjectName("InputGroup")
self.gridLayout_5 = QtWidgets.QGridLayout(self.InputGroup)
self.gridLayout_5.setContentsMargins(-1, 11, -1, -1)
self.gridLayout_5.setObjectName("gridLayout_5")
self.UrlLabel = QtWidgets.QLabel(self.InputGroup)
self.UrlLabel.setObjectName("UrlLabel")
self.gridLayout_5.addWidget(self.UrlLabel, 0, 0, 1, 1)
self.UrlTextBox = QtWidgets.QLineEdit(self.InputGroup)
self.UrlTextBox.setObjectName("UrlTextBox")
self.gridLayout_5.addWidget(self.UrlTextBox, 0, 1, 1, 1)
self.gridLayout.addWidget(self.InputGroup, 0, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
self.gridLayout.addItem(spacerItem, 3, 0, 1, 1)
self.DownloadGroupBox = QtWidgets.QGroupBox(self.MainWidget)
self.DownloadGroupBox.setMinimumSize(QtCore.QSize(0, 97))
self.DownloadGroupBox.setMaximumSize(QtCore.QSize(16777215, 80))
self.DownloadGroupBox.setStyleSheet("alternate-background-color: rgb(255, 255, 0);")
self.DownloadGroupBox.setFlat(False)
self.DownloadGroupBox.setObjectName("DownloadGroupBox")
self.gridLayout_6 = QtWidgets.QGridLayout(self.DownloadGroupBox)
self.gridLayout_6.setObjectName("gridLayout_6")
self.DownloadProgress = QtWidgets.QProgressBar(self.DownloadGroupBox)
self.DownloadProgress.setProperty("value", 0)
self.DownloadProgress.setObjectName("DownloadProgress")
self.gridLayout_6.addWidget(self.DownloadProgress, 1, 1, 1, 1)
self.DownloadInfo = QtWidgets.QWidget(self.DownloadGroupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.DownloadInfo.sizePolicy().hasHeightForWidth())
self.DownloadInfo.setSizePolicy(sizePolicy)
self.DownloadInfo.setObjectName("DownloadInfo")
self.gridLayout_7 = QtWidgets.QGridLayout(self.DownloadInfo)
self.gridLayout_7.setContentsMargins(0, 0, 10, 0)
self.gridLayout_7.setObjectName("gridLayout_7")
self.sep2 = QtWidgets.QFrame(self.DownloadInfo)
self.sep2.setFrameShape(QtWidgets.QFrame.VLine)
self.sep2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.sep2.setObjectName("sep2")
self.gridLayout_7.addWidget(self.sep2, 0, 2, 1, 1)
self.SpeedLabel = QtWidgets.QLabel(self.DownloadInfo)
self.SpeedLabel.setMinimumSize(QtCore.QSize(90, 0))
self.SpeedLabel.setObjectName("SpeedLabel")
self.gridLayout_7.addWidget(self.SpeedLabel, 0, 3, 1, 1)
self.sep3 = QtWidgets.QFrame(self.DownloadInfo)
self.sep3.setFrameShape(QtWidgets.QFrame.VLine)
self.sep3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.sep3.setObjectName("sep3")
self.gridLayout_7.addWidget(self.sep3, 0, 5, 1, 1)
self.ETALabel = QtWidgets.QLabel(self.DownloadInfo)
self.ETALabel.setMinimumSize(QtCore.QSize(90, 0))
self.ETALabel.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.ETALabel.setObjectName("ETALabel")
self.gridLayout_7.addWidget(self.ETALabel, 0, 0, 1, 1)
self.FileSizeLabel = QtWidgets.QLabel(self.DownloadInfo)
self.FileSizeLabel.setMinimumSize(QtCore.QSize(90, 0))
self.FileSizeLabel.setObjectName("FileSizeLabel")
self.gridLayout_7.addWidget(self.FileSizeLabel, 0, 6, 1, 1)
self.gridLayout_6.addWidget(self.DownloadInfo, 2, 1, 1, 1)
self.DownloadButton = QtWidgets.QCommandLinkButton(self.DownloadGroupBox)
self.DownloadButton.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.DownloadButton.sizePolicy().hasHeightForWidth())
self.DownloadButton.setSizePolicy(sizePolicy)
self.DownloadButton.setMinimumSize(QtCore.QSize(0, 0))
self.DownloadButton.setMaximumSize(QtCore.QSize(16777215, 60))
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.DownloadButton.setFont(font)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/forward.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.DownloadButton.setIcon(icon2)
self.DownloadButton.setAutoRepeat(False)
self.DownloadButton.setAutoExclusive(False)
self.DownloadButton.setAutoDefault(False)
self.DownloadButton.setDefault(False)
self.DownloadButton.setObjectName("DownloadButton")
self.gridLayout_6.addWidget(self.DownloadButton, 1, 0, 2, 1)
self.gridLayout.addWidget(self.DownloadGroupBox, 4, 0, 1, 1)
MainWindow.setCentralWidget(self.MainWidget)
self.MenuBar = QtWidgets.QMenuBar(MainWindow)
self.MenuBar.setGeometry(QtCore.QRect(0, 0, 986, 21))
self.MenuBar.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
self.MenuBar.setObjectName("MenuBar")
self.Preferences = QtWidgets.QMenu(self.MenuBar)
self.Preferences.setObjectName("Preferences")
self.ViewMenu = QtWidgets.QMenu(self.MenuBar)
self.ViewMenu.setObjectName("ViewMenu")
self.Theme = QtWidgets.QMenu(self.ViewMenu)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/theme.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Theme.setIcon(icon3)
self.Theme.setObjectName("Theme")
self.Help = QtWidgets.QMenu(self.MenuBar)
self.Help.setObjectName("Help")
self.CommandHelpMenu = QtWidgets.QMenu(self.Help)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/help_index.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.CommandHelpMenu.setIcon(icon4)
self.CommandHelpMenu.setObjectName("CommandHelpMenu")
self.menuTools = QtWidgets.QMenu(self.MenuBar)
self.menuTools.setObjectName("menuTools")
MainWindow.setMenuBar(self.MenuBar)
self.StatusBar = QtWidgets.QStatusBar(MainWindow)
self.StatusBar.setObjectName("StatusBar")
MainWindow.setStatusBar(self.StatusBar)
self.ConsoleDock = QtWidgets.QDockWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.ConsoleDock.sizePolicy().hasHeightForWidth())
self.ConsoleDock.setSizePolicy(sizePolicy)
self.ConsoleDock.setFloating(False)
self.ConsoleDock.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea|QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)
self.ConsoleDock.setObjectName("ConsoleDock")
self.ConsoleWidget = QtWidgets.QWidget()
self.ConsoleWidget.setObjectName("ConsoleWidget")
self.gridLayout_9 = QtWidgets.QGridLayout(self.ConsoleWidget)
self.gridLayout_9.setContentsMargins(0, 0, 0, 0)
self.gridLayout_9.setObjectName("gridLayout_9")
self.ConsoleTextBox = QtWidgets.QPlainTextEdit(self.ConsoleWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.ConsoleTextBox.sizePolicy().hasHeightForWidth())
self.ConsoleTextBox.setSizePolicy(sizePolicy)
self.ConsoleTextBox.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.ConsoleTextBox.setStyleSheet("background-color: rgb(12, 12, 12);\n"
"color: rgb(204, 204, 204);\n"
"selection-color: rgb(12, 12, 12);\n"
"selection-background-color: rgb(204, 204, 204);\n"
"font: 9pt \"Consolas\";")
self.ConsoleTextBox.setFrameShape(QtWidgets.QFrame.NoFrame)
self.ConsoleTextBox.setReadOnly(True)
self.ConsoleTextBox.setObjectName("ConsoleTextBox")
self.gridLayout_9.addWidget(self.ConsoleTextBox, 0, 0, 1, 1)
self.ConsoleDock.setWidget(self.ConsoleWidget)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(8), self.ConsoleDock)
self.DwItems = QtWidgets.QDockWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.DwItems.sizePolicy().hasHeightForWidth())
self.DwItems.setSizePolicy(sizePolicy)
self.DwItems.setMinimumSize(QtCore.QSize(350, 91))
self.DwItems.setFeatures(QtWidgets.QDockWidget.AllDockWidgetFeatures)
self.DwItems.setObjectName("DwItems")
self.DwItemsListWidget = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(2)
sizePolicy.setHeightForWidth(self.DwItemsListWidget.sizePolicy().hasHeightForWidth())
self.DwItemsListWidget.setSizePolicy(sizePolicy)
self.DwItemsListWidget.setObjectName("DwItemsListWidget")
self.gridLayout_10 = QtWidgets.QGridLayout(self.DwItemsListWidget)
self.gridLayout_10.setContentsMargins(0, 0, 0, 0)
self.gridLayout_10.setObjectName("gridLayout_10")
self.DwItemsList = QtWidgets.QTableWidget(self.DwItemsListWidget)
self.DwItemsList.setFrameShape(QtWidgets.QFrame.NoFrame)
self.DwItemsList.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.DwItemsList.setObjectName("DwItemsList")
self.DwItemsList.setColumnCount(4)
self.DwItemsList.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.DwItemsList.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.DwItemsList.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.DwItemsList.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.DwItemsList.setHorizontalHeaderItem(3, item)
self.DwItemsList.horizontalHeader().setDefaultSectionSize(114)
self.gridLayout_10.addWidget(self.DwItemsList, 0, 0, 1, 1)
self.DwItems.setWidget(self.DwItemsListWidget)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.DwItems)
self.DwGraphDock = QtWidgets.QDockWidget(MainWindow)
self.DwGraphDock.setMinimumSize(QtCore.QSize(76, 140))
self.DwGraphDock.setObjectName("DwGraphDock")
self.DwGraphDockWidget = QtWidgets.QWidget()
self.DwGraphDockWidget.setObjectName("DwGraphDockWidget")
self.gridLayout_11 = QtWidgets.QGridLayout(self.DwGraphDockWidget)
self.gridLayout_11.setContentsMargins(0, 0, 0, 0)
self.gridLayout_11.setObjectName("gridLayout_11")
self.DwGraph = PlotWidget(self.DwGraphDockWidget)
self.DwGraph.setStyleSheet("background-color: rgb(0, 0, 0);")
self.DwGraph.setObjectName("DwGraph")
self.gridLayout_11.addWidget(self.DwGraph, 0, 0, 1, 1)
self.DwGraphDock.setWidget(self.DwGraphDockWidget)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(4), self.DwGraphDock)
self.About = QtWidgets.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/info.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.About.setIcon(icon5)
self.About.setObjectName("About")
self.AdditionalSwitches = QtWidgets.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/switch-plus.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.AdditionalSwitches.setIcon(icon6)
self.AdditionalSwitches.setObjectName("AdditionalSwitches")
self.ConsoleOption = QtWidgets.QAction(MainWindow)
self.ConsoleOption.setCheckable(True)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/terminal.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.ConsoleOption.setIcon(icon7)
self.ConsoleOption.setObjectName("ConsoleOption")
self.youtube_dlHelp = QtWidgets.QAction(MainWindow)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/ytdl.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.youtube_dlHelp.setIcon(icon8)
self.youtube_dlHelp.setObjectName("youtube_dlHelp")
self.ffmpegHelp = QtWidgets.QAction(MainWindow)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/ffmpeg.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.ffmpegHelp.setIcon(icon9)
self.ffmpegHelp.setObjectName("ffmpegHelp")
self.Support = QtWidgets.QAction(MainWindow)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/helpcenter.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Support.setIcon(icon10)
self.Support.setObjectName("Support")
self.LightOption = QtWidgets.QAction(MainWindow)
self.LightOption.setCheckable(True)
self.LightOption.setChecked(True)
self.LightOption.setObjectName("LightOption")
self.DarkOption = QtWidgets.QAction(MainWindow)
self.DarkOption.setCheckable(True)
self.DarkOption.setObjectName("DarkOption")
self.DownloadedItems = QtWidgets.QAction(MainWindow)
self.DownloadedItems.setCheckable(True)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/view_text.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.DownloadedItems.setIcon(icon11)
self.DownloadedItems.setObjectName("DownloadedItems")
self.DownloadGraph = QtWidgets.QAction(MainWindow)
self.DownloadGraph.setCheckable(True)
self.DownloadGraph.setEnabled(False)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/chart.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.DownloadGraph.setIcon(icon12)
self.DownloadGraph.setObjectName("DownloadGraph")
self.ProxySettings = QtWidgets.QAction(MainWindow)
self.ProxySettings.setCheckable(True)
self.ProxySettings.setEnabled(True)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/proxy.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.ProxySettings.setIcon(icon13)
self.ProxySettings.setObjectName("ProxySettings")
self.PreferNotif = QtWidgets.QAction(MainWindow)
self.PreferNotif.setCheckable(True)
icon14 = QtGui.QIcon()
icon14.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/bell.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.PreferNotif.setIcon(icon14)
self.PreferNotif.setObjectName("PreferNotif")
self.AFOperation = QtWidgets.QAction(MainWindow)
icon15 = QtGui.QIcon()
icon15.addPixmap(QtGui.QPixmap("e:\\SOFTWARE\\PythonProjects\\youtubedl_GUI_pyqt\\interface\\../assets/playlist.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.AFOperation.setIcon(icon15)
self.AFOperation.setObjectName("AFOperation")
self.Preferences.addAction(self.PreferNotif)
self.Preferences.addAction(self.ProxySettings)
self.Preferences.addSeparator()
self.Preferences.addAction(self.AdditionalSwitches)
self.Theme.addAction(self.LightOption)
self.Theme.addAction(self.DarkOption)
self.ViewMenu.addAction(self.ConsoleOption)
self.ViewMenu.addAction(self.DownloadedItems)
self.ViewMenu.addAction(self.DownloadGraph)
self.ViewMenu.addSeparator()
self.ViewMenu.addAction(self.Theme.menuAction())
self.CommandHelpMenu.addAction(self.youtube_dlHelp)
self.CommandHelpMenu.addAction(self.ffmpegHelp)
self.Help.addAction(self.CommandHelpMenu.menuAction())
self.Help.addAction(self.Support)
self.Help.addSeparator()
self.Help.addAction(self.About)
self.menuTools.addAction(self.AFOperation)
self.MenuBar.addAction(self.Preferences.menuAction())
self.MenuBar.addAction(self.ViewMenu.menuAction())
self.MenuBar.addAction(self.menuTools.menuAction())
self.MenuBar.addAction(self.Help.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.UrlTextBox, self.VideoOption)
MainWindow.setTabOrder(self.VideoOption, self.AudioOption)
MainWindow.setTabOrder(self.AudioOption, self.TemplateInput)
MainWindow.setTabOrder(self.TemplateInput, self.RangeInput)
MainWindow.setTabOrder(self.RangeInput, self.DestinationButton)
MainWindow.setTabOrder(self.DestinationButton, self.DestinationInput)
MainWindow.setTabOrder(self.DestinationInput, self.DownloadButton)
MainWindow.setTabOrder(self.DownloadButton, self.ConsoleTextBox)
MainWindow.setTabOrder(self.ConsoleTextBox, self.DwItemsList)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "youtube-dl GUI"))
self.ConfigGroup.setTitle(_translate("MainWindow", "Configuration"))
self.TemplateLabel.setText(_translate("MainWindow", "Output template"))
self.RangeLabel.setText(_translate("MainWindow", "Download range"))
self.RangeInput.setToolTip(_translate("MainWindow", "<html><head/><body><p>Select the range for download. </p><p>- <span style=\" font-weight:600;\">\'\' </span>Leaving it empty downloads all items.</p><p>- <span style=\" font-weight:600;\">m-n </span>Download from specified range.</p><p><span style=\" font-weight:600;\">- 1,2,..n </span>Download selected items.</p><p>- <span style=\" font-weight:600;\">x,m-n,y,z</span> Download specific items and from range.</p></body></html>"))
self.TemplateInput.setToolTip(_translate("MainWindow", "<html><head/><body><p>Append information to the end of your chosen output.</p><p>- <span style=\" font-weight:600;\">%(title)s</span> - video title</p><p>- <span style=\" font-weight:600;\">%(alt_title)s</span> - alternative video title</p><p>- <span style=\" font-weight:600;\">%(id)s</span> - video id</p><p>- <span style=\" font-weight:600;\">%(creator)s</span> - video creator</p><p>- <span style=\" font-weight:600;\">%(playlist_title)s</span> - playlist title</p><p>- <span style=\" font-weight:600;\">%(playlist_index)s</span> - video position in the playlist</p><p>Need the full list? Look for youtube-dl\'s README.md file at GitHub.</p></body></html>"))
self.ExportLabel.setText(_translate("MainWindow", "Export as..."))
self.VideoOption.setToolTip(_translate("MainWindow", "Download video and audio"))
self.VideoOption.setText(_translate("MainWindow", "Video + Audio"))
self.AudioOption.setToolTip(_translate("MainWindow", "Download audio only, ideal for music or sound effects"))
self.AudioOption.setText(_translate("MainWindow", "Audio-only"))
self.OutputGroup.setTitle(_translate("MainWindow", "Output"))
self.DestinationInput.setToolTip(_translate("MainWindow", "The path to your final file"))
self.DestinationLabel.setText(_translate("MainWindow", "Destination"))
self.DestinationButton.setToolTip(_translate("MainWindow", "Select the file\'s destination"))
self.DestinationButton.setText(_translate("MainWindow", "..."))
self.InputGroup.setTitle(_translate("MainWindow", "Input"))
self.UrlLabel.setText(_translate("MainWindow", "YouTube URL"))
self.UrlTextBox.setToolTip(_translate("MainWindow", "Input URL for your youtube video, playlist, or channel"))
self.DownloadGroupBox.setTitle(_translate("MainWindow", "Download information"))
self.SpeedLabel.setText(_translate("MainWindow", "Speed:"))
self.ETALabel.setText(_translate("MainWindow", "ETA:"))
self.FileSizeLabel.setText(_translate("MainWindow", "File size:"))
self.DownloadButton.setToolTip(_translate("MainWindow", "Finally, download!"))
self.DownloadButton.setText(_translate("MainWindow", "Start\n"
"download!"))
self.Preferences.setTitle(_translate("MainWindow", "Preferences"))
self.ViewMenu.setTitle(_translate("MainWindow", "View"))
self.Theme.setTitle(_translate("MainWindow", "Theme"))
self.Help.setTitle(_translate("MainWindow", "Help"))
self.CommandHelpMenu.setTitle(_translate("MainWindow", "Cmd line help"))
self.menuTools.setTitle(_translate("MainWindow", "Tools"))
self.ConsoleDock.setWindowTitle(_translate("MainWindow", "Console output"))
self.DwItems.setWindowTitle(_translate("MainWindow", "Downloaded files"))
item = self.DwItemsList.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Started"))
item = self.DwItemsList.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "File"))
item = self.DwItemsList.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Size"))
item = self.DwItemsList.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Total time"))
self.DwGraphDock.setWindowTitle(_translate("MainWindow", "Download graph"))
self.About.setText(_translate("MainWindow", "About"))
self.About.setShortcut(_translate("MainWindow", "F1"))
self.AdditionalSwitches.setText(_translate("MainWindow", "Additional switches"))
self.AdditionalSwitches.setToolTip(_translate("MainWindow", "Additional switches"))
self.ConsoleOption.setText(_translate("MainWindow", "Console output"))
self.ConsoleOption.setToolTip(_translate("MainWindow", "Console output"))
self.ConsoleOption.setShortcut(_translate("MainWindow", "F12"))
self.youtube_dlHelp.setText(_translate("MainWindow", "youtube-dl"))
self.youtube_dlHelp.setShortcut(_translate("MainWindow", "Shift+F1"))
self.ffmpegHelp.setText(_translate("MainWindow", "ffmpeg"))
self.ffmpegHelp.setShortcut(_translate("MainWindow", "Alt+Shift+F1"))
self.Support.setText(_translate("MainWindow", "Support"))
self.Support.setShortcut(_translate("MainWindow", "Alt+F1"))
self.LightOption.setText(_translate("MainWindow", "Light"))
self.DarkOption.setText(_translate("MainWindow", "Dark"))
self.DownloadedItems.setText(_translate("MainWindow", "Downloaded items"))
self.DownloadedItems.setShortcut(_translate("MainWindow", "F10"))
self.DownloadGraph.setText(_translate("MainWindow", "Download graph"))
self.DownloadGraph.setShortcut(_translate("MainWindow", "Ctrl+Shift+I"))
self.ProxySettings.setText(_translate("MainWindow", "Proxy settings"))
self.PreferNotif.setText(_translate("MainWindow", "Prefer notifications over message boxes"))
self.AFOperation.setText(_translate("MainWindow", "Album folder organizer"))
from pyqtgraph import PlotWidget
|
# Generated by Django 3.1.4 on 2021-04-20 19:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('finance_api', '0007_auto_20210304_1355'),
]
operations = [
migrations.CreateModel(
name='WallStreetBetsTickerMentions',
fields=[
('day', models.DateField(primary_key=True, serialize=False, unique=True)),
('tickers', models.TextField()),
],
options={
'ordering': ['day'],
},
),
]
|
#!/usr/bin/env python3
"""Simple demo on how to use the TriFingerPlatform interface."""
import argparse
import time
import cv2
import numpy as np
from trifinger_simulation import trifinger_platform, sample
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--enable-cameras",
"-c",
action="store_true",
help="Enable camera observations.",
)
parser.add_argument(
"--iterations",
type=int,
default=100,
help="Number of motions that are performed.",
)
parser.add_argument(
"--save-action-log",
type=str,
metavar="FILENAME",
help="If set, save the action log to the specified file.",
)
args = parser.parse_args()
platform = trifinger_platform.TriFingerPlatform(
visualization=True, enable_cameras=args.enable_cameras
)
# Move the fingers to random positions so that the cube is kicked around
# (and thus it's position changes).
for _ in range(args.iterations):
goal = np.array(
sample.random_joint_positions(
number_of_fingers=3,
lower_bounds=[-1, -1, -2],
upper_bounds=[1, 1, 2],
)
)
finger_action = platform.Action(position=goal)
# apply action for a few steps, so the fingers can move to the target
# position and stay there for a while
for _ in range(250):
t = platform.append_desired_action(finger_action)
time.sleep(platform.get_time_step())
# show the latest observations
robot_observation = platform.get_robot_observation(t)
print("Finger0 Position: %s" % robot_observation.position[:3])
camera_observation = platform.get_camera_observation(t)
print("Cube Position: %s" % camera_observation.object_pose.position)
if platform.enable_cameras:
for i, name in enumerate(("camera60", "camera180", "camera300")):
# simulation provides images in RGB but OpenCV expects BGR
img = cv2.cvtColor(
camera_observation.cameras[i].image, cv2.COLOR_RGB2BGR
)
cv2.imshow(name, img)
cv2.waitKey(1)
print()
if args.save_action_log:
platform.store_action_log(args.save_action_log)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# visualize-examples.py
import concepts.visualize
DIRECTORY = 'visualize-output'
FORMAT = 'pdf'
concepts.visualize.render_all('examples/*.cxt', directory=DIRECTORY, out_format=FORMAT)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
'''
import codecs
from setuptools import setup
__author__ = 'Mircea Ulinic <ping@mirceaulinic.net>'
with codecs.open('README.rst', 'r', encoding='utf8') as file:
long_description = file.read()
setup(
name='alerta-blackout-regex',
version='2.0.2',
author='Mircea Ulinic',
author_email='ping@mirceaulinic.net',
py_modules=['blackout_regex'],
description='Alerta Blackout enhancement plugin',
long_description=long_description,
include_package_data=True,
zip_safe=True,
url='https://github.com/mirceaulinic/alerta-blackout-regex',
license="Apache License 2.0",
entry_points={'alerta.plugins': ['blackout_regex = blackout_regex:BlackoutRegex']},
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-15 15:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PartyList', '0006_change_on_delete'),
]
operations = [
migrations.AddField(
model_name='party',
name='girls_ever_signed_in',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='party',
name='guys_ever_signed_in',
field=models.IntegerField(default=0),
),
]
|
import cv2
img = cv2.imread('fotos-teste/img1.jpg')
classifer = "haarcascade_frontalface_alt2.xml"
# carregando o algoritimo do classicador desejado
loadAlg = cv2.CascadeClassifier(cv2.data.haarcascades + classifer)
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while not cv2.waitKey(20) & 0xFF == ord('q'):
ret, frame_color = capture.read()
gray = cv2.cvtColor(frame_color, cv2.COLOR_BGR2GRAY)
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = loadAlg.detectMultiScale(gray)
for x, y, w, h in faces:
cv2.rectangle(frame_color,(x,y), (x+w, y+h), (0,255,0),2)
cv2.imshow('color', frame_color)
# cv2.imshow('img', img)
# cv2.imshow('gray', gray)
|
from models.jira_config import Jira_Config
from jira.jira_builder import query_issues_for_sprint, query_specific_issue, query_active_sprints, query_issues_for_sprint_ordered
from jira.jira_response_parser import parse_query_specific_ticket, parse_active_sprint_id_response, parse_query_tickets_for_sprint_response
from models.sprint import Sprint
def get_specific_ticket(ticketName):
apiResponse = query_specific_issue(ticketName)
ticket = parse_query_specific_ticket(apiResponse)
return ticket
def get_active_sprint_id():
apiResponse = query_active_sprints()
sprintId = parse_active_sprint_id_response(apiResponse)
return sprintId
def get_tickets_for_sprint(sprintNumber):
jira_config = Jira_Config();
if jira_config.jira_sort_property_id:
apiResponse = query_issues_for_sprint_ordered(sprintNumber,jira_config.jira_sort_property_id)
else:
apiResponse = query_issues_for_sprint(sprintNumber)
tickets = parse_query_tickets_for_sprint_response(apiResponse)
return tickets
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import json
from google.cloud import pubsub_v1
publisher_client = pubsub_v1.PublisherClient()
project_id = "<PROJECT_ID>"
topic_name = "<PUBSUB_TOPIC_NAME>"
topic_path = publisher_client.topic_path(project_id, topic_name)
futures = dict()
with open("config.json", "r") as f:
data = f.read()
config = json.loads(data)
# Python 3+ version of https://github.com/slackapi/python-slack-events-api/blob/master/slackeventsapi/server.py
def verify_signature(request):
""" Takes a Slack request and determines
if the request and its credentials are valid.
Args:
request (flask.Request): A Slack request
"""
timestamp = request.headers.get("X-Slack-Request-Timestamp", "")
signature = request.headers.get("X-Slack-Signature", "")
req = str.encode("v0:{}:".format(timestamp)) + request.get_data()
request_digest = hmac.new(
str.encode(config["SLACK_SECRET"]), req, hashlib.sha256
).hexdigest()
request_hash = "v0={}".format(request_digest)
if not hmac.compare_digest(request_hash, signature):
raise ValueError("Invalid request/credentials.")
def doge_queue(request):
"""HTTP Cloud Function. Takes a Slack request and passes it to
a second Cloud Function for processing via Pub/Sub.
Args:
request (flask.Request): A Slack request
Returns:
A response to the slack channel
"""
if request.method != "POST":
return "Only POST requests are accepted", 405
verify_signature(request)
data = json.dumps(request.form)
futures.update({data: None})
# When you publish a message, the client returns a future.
future = publisher_client.publish(
topic_path, data=data.encode("utf-8") # data must be a bytestring.
)
"""
Check if future.result() resolved with the ID of the message.
This indicates the message was successful.
"""
try:
print(future.result())
except Exception as e:
print("Error publishing: " + str(e))
return "Working on it! 🐕"
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-06 15:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0005_move_fields_from_news'),
]
operations = [
migrations.AlterField(
model_name='advert',
name='news_ptr',
field=models.AutoField(auto_created=True, default=1, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.RenameField(
model_name='advert',
old_name='news_ptr',
new_name="id"
),
]
|
# ======================================================
# Checkpoint 2 : code supplied to students at start.
#
# Exact form of Charge Distribution
import math
import matplotlib.pyplot as pl
import numpy as np
# Plot the solutions to an ODE - one found by the RK4 method and one by Euler
def plotRK4Euler(xvaleuler, yvaleuler, xvalRK4, yvalRK4, eulercolor, RK4color, plottitle, filename):
# Plot euler and RK4 points on canvas
pl.plot(xvaleuler, yvaleuler, color=eulercolor, label='Euler')
pl.plot(xvalRK4, yvalRK4, color=RK4color, label='RK4')
pl.legend()
pl.title(plottitle)
pl.savefig(filename)
pl.clf() # Clear canvas
# Plot the difference between the RK4 and euler solutions
def plotDifferenceRK4Euler(xval, yvaleuler, yvalRK4, linecolor, marker, plottitle, filename):
# Plot difference between euler and RK4 on canvas
# The length of is assumed to be the same (at the discretion of the user)
pl.plot(xval, yvalRK4 - yvaleuler, color=linecolor, marker=marker)
pl.title(plottitle)
pl.savefig(filename)
pl.clf() # Clear canvas
#---------------------------------------
# Charge distribution at the PN junction
class ChargeDistribution:
#..............................................
# Methods for the user of this class
# To evaluate the y-value of the charge for an input x-value
def evaluate(self, x):
if( x < self.x1): return 0
if( x < self.x2): return self._shape( self.x1, self.x2, x)
if( x < self.x3): return -self._shape( self.x3, self.x2, x)
return 0.
# To plot the function on the screen
def show(self, title='', disp=True ):
xvalues, yvalues = self._get()
pl.plot( xvalues, yvalues )
pl.title( title )
if(disp):pl.show()
# Solve the charge distribution for the electric field over the range [x0, x1]
# using the initial conditions E(x0)=y0
def getelectricfield(self, method, x0, y0, x1, numpoints):
xvalues = np.linspace(x0, x1, numpoints) # Define equally spaced x values over the range in an array
yvalues = np.empty(numpoints) # Define empty array for E values
yvalues[0] = y0
xvaluespacing = float(x1 - x0) / numpoints
for i in range(1, numpoints):
if method == 'RK4':
k1 = self.evaluate(xvalues[i - 1]) # slope at beginning
k2 = self.evaluate(xvalues[i - 1] + xvaluespacing / 2) # slope at midpoint
k3 = k2 # another slope at midpoint
k4 = self.evaluate(xvalues[i - 1] + xvaluespacing) # slope at endpoint
yvalues[i] = yvalues[i - 1] + xvaluespacing * (k1 / 6. + k2 / 3. + k3 / 3. + k4 / 6.)
if method == 'euler':
yvalues[i] = yvalues[i - 1] + xvaluespacing * self.evaluate(xvalues[i - 1])
return xvalues, yvalues
# Solve for the voltage using the electric field over the range [x0, x1]
# using the initial conditions V(x0)=y0
def getvoltage(self, method, x0, y0, x1, Eyvalues):
numpoints = len(Eyvalues)
xvalues = np.linspace(x0, x1, numpoints)
yvalues = np.empty(numpoints)
yvalues[0] = y0
xvaluespacing = float(x1 - x0) / numpoints
for i in range(1, numpoints):
# Choose Runge-Kutta 4 method
if method == 'RK4':
k1 = - Eyvalues[i - 1] # slope at beginning
k2 = - (Eyvalues[i - 1] + Eyvalues[i]) / 2 # slope at midpoint
k3 = k2 # another slope at midpoint
k4 = - Eyvalues[i] # slope at endpoint
yvalues[i] = yvalues[i - 1] + xvaluespacing * (k1 / 6. + k2 / 3. + k3 / 3. + k4 / 6.)
# Choose Euler method
if method == 'euler':
yvalues[i] = yvalues[i - 1] + xvaluespacing * - Eyvalues[i - 1]
return xvalues, yvalues
#...........................................
#constructor
def __init__(self):
self.x0 = -2.
self.x1 = -1.
self.x2 = 0.
self.x3 = 1
self.x4 = 2
self.k = math.pi/(self.x3-self.x1)
# pseudo internal methods
def _shape(self, x0, x1, x):
z = (x-x0)/(x1-x0)
return (z**2)* (math.exp(1-z)-1.) / 0.18
def _get( self, start=-2, end=2., n=1000 ):
xvalues= []
yvalues = []
dx = (end-start)/n
for i in range(n):
xvalues.append(start+i*dx)
yvalues.append(self.evaluate(start+i*dx))
return xvalues, yvalues
# Testing
if __name__ == '__main__':
# Define charge distribution
chargeDistribution = ChargeDistribution()
# Compute electric field and voltage using both methods
numpoints = 50 # Number of points to solve for the ODE
# Initial conditions for voltage
Vx0 = -2
Vy0 = 0
# Initial conditions for electric field
Ex0 = -2
Ey0 = 0
# Final x values for electric field and voltage
Exf = 2
Vxf = 2
ExvalRK4, EyvalRK4 = chargeDistribution.getelectricfield('RK4', Ex0, Ey0, Exf, numpoints)
Exvaleuler, Eyvaleuler = chargeDistribution.getelectricfield('euler', Ex0, Ey0, Exf, numpoints)
VxvalRK4, VyvalRK4 = chargeDistribution.getvoltage('RK4', Vx0, Vy0, Vxf, EyvalRK4)
Vxvaleuler, Vyvaleuler = chargeDistribution.getvoltage('euler', Vx0, Vy0, Vxf, Eyvaleuler)
# Save plot of electric field found using RK4 and euler methods
plotRK4Euler(Exvaleuler, Eyvaleuler, ExvalRK4, EyvalRK4, 'red', 'blue',
'Electric field against x', 'electricfield.png')
# Save plot of voltage found using RK4 and euler methods
plotRK4Euler(Vxvaleuler, Vyvaleuler, VxvalRK4, VyvalRK4, 'red', 'blue',
'Voltage against x', 'voltage.png')
# Save plot of difference between electric field for RK4 and euler methods
plotDifferenceRK4Euler(Exvaleuler, Eyvaleuler, EyvalRK4, 'red', '.',
'Difference between Electric field solutions using RK4/Euler', 'electricfieldcomparison.png')
# Save plot of difference between voltage for RK4 and euler methods
plotDifferenceRK4Euler(Vxvaleuler, Vyvaleuler, VyvalRK4, 'red', '.',
'Difference between Voltage solutions using RK4/Euler', 'voltagecomparison.png')
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy
from torch.autograd import Variable
import matplotlib.pyplot as plt
from s2s.optim import *
from model import make_model
def train_epoch(train_iter, model, criterion, opt, transpose=False):
model.train()
for i, batch in enumerate(train_iter):
src, trg, src_mask, trg_mask = \
batch.src, batch.trg, batch.src_mask, batch.trg_mask
out = model.forward(src, trg[:, :-1], src_mask, trg_mask[:, :-1, :-1])
loss = loss_backprop(model.generator, criterion, out, trg[:, 1:], batch.ntokens)
model_opt.step()
model_opt.optimizer.zero_grad()
if i % 10 == 1:
print(i, loss, model_opt._rate)
def valid_epoch(valid_iter, model, criterion, transpose=False):
model.test()
total = 0
for batch in valid_iter:
src, trg, src_mask, trg_mask = batch.src, batch.trg, batch.src_mask, batch.trg_mask
out = model.forward(src, trg[:, :-1], src_mask, trg_mask[:, :-1, :-1])
loss = loss_backprop(model.generator, criterion, out, trg[:, 1:], batch.ntokens)
class Batch:
def __init__(self, src, trg, src_mask, trg_mask, ntokens):
self.src = src
self.trg = trg
self.src_mask = src_mask
self.trg_mask = trg_mask
self.ntokens = ntokens
def data_gen(V, batch, nbatches):
for i in range(nbatches):
data = torch.from_numpy(np.random.randint(1, V, size=(batch, 10)))
src = Variable(data, requires_grad=False)
tgt = Variable(data, requires_grad=False)
src_mask, tgt_mask = make_std_mask(src, tgt, 0)
yield Batch(src, tgt, src_mask, tgt_mask, (tgt[1:] != 0).data.sum())
V = 11
criterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0)
model = make_model(V, V, N=2)
print(model)
model_opt = get_std_opt(model)
for epoch in range(2):
train_epoch(data_gen(V, 30, 20), model, criterion, model_opt)
|
from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import datetime as dt
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "assets/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
# def variables to store data
Mars_data = {}
# find the latest news title and para
browser = init_browser()
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
browser.visit(url)
html = browser.html
soup = bs(html, "lxml")
Mars_data["news_title"] = soup.find("div", class_="content_title").a.text
Mars_data["news_p"] = soup.find("div", class_="article_teaser_body").text
# open initial website find full img url
url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
html = browser.html
soup = bs(html,"lxml")
#get the datalink from this webpage
md_size = soup.find("footer").a["data-link"] #medium sized
full_url = "https://www.jpl.nasa.gov" + md_size
#go to the webpage where the fullsized image can be found
browser.visit(full_url)
html = browser.html
soup = bs(html,"lxml")
#Get the url for the full-sized image
lg_size = soup.find("figure").a.img["src"]
Mars_data['featured_img'] = f"https://www.jpl.nasa.gov{lg_size}"
# mars weather
url = "https://twitter.com/marswxreport?lang=en"
browser.visit(url)
html = browser.html
soup = bs(html,"lxml")
Mars_data['weather'] = soup.find("p", class_ = "tweet-text").text
# Mars facts
url = "https://space-facts.com/mars/"
df = pd.read_html(url)
df = df[0]
df.columns = ["description", "value"]
df.set_index("description", inplace=True)
Mars_data['table'] = df.to_html(classes='table table-striped')
# mars hemispheres
#open main web page
url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url)
html = browser.html
soup = bs(html,"lxml")
imgs = soup.find_all("div", class_ = "item") # find all divisions containing differnt hemispheres
#create an empty dict
hemispheres = []
#loop through each hemisophere and get title and image link
for img in imgs:
hemisphere = {}
title = img.find('div', class_="description").a.h3.text
browser.click_link_by_partial_text(title)
html = browser.html
soup = bs(html,"lxml")
img_link = soup.ul.li.a["href"]
hemisphere["title"] = title.split(" E")[0]
hemisphere["img_url"] = img_link
hemispheres.append(hemisphere)
browser.back()
Mars_data['hemispheres'] = hemispheres
# close browser
browser.quit()
# when this page is modified
Mars_data["last_updated"] = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return Mars_data
|
__source__ = 'https://leetcode.com/problems/wiggle-subsequence/#/description'
# https://github.com/kamyu104/LeetCode/blob/master/Python/wiggle-subsequence.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 376. Wiggle Subsequence
#
# A sequence of numbers is called a wiggle sequence
# if the differences between successive numbers strictly
# alternate between positive and negative.
# The first difference (if one exists) may be either positive
# or negative. A sequence with fewer than two elements
# is trivially a wiggle sequence.
#
# For example, [1,7,4,9,2,5] is a wiggle sequence because
# the differences (6,-3,5,-7,3) are alternately positive
# and negative. In contrast, [1,4,7,2,5] and [1,7,4,5,5] are
# not wiggle sequences, the first because its first two differences
# are positive and the second because its last difference is zero.
#
# Given a sequence of integers, return the length of
# the longest subsequence that is a wiggle sequence.
# A subsequence is obtained by deleting some number of elements
# (eventually, also zero) from the original sequence, leaving
# the remaining elements in their original order.
#
# Examples:
# Input: [1,7,4,9,2,5]
# Output: 6
# The entire sequence is a wiggle sequence.
#
# Input: [1,17,5,10,13,15,10,5,16,8]
# Output: 7
# There are several subsequences that achieve this length. One is [1,17,10,13,10,16,8].
#
# Input: [1,2,3,4,5,6,7,8,9]
# Output: 2
#
# Follow up:
# Can you do it in O(n) time?
#
# Related Topics
# Dynamic Programming Greedy
import unittest
# Explanation / Proof:
#
# Imagine the given array contains [..., 10, 10, 10, 10, ...].
# Obviously we can't use more than one of those tens,
# as that wouldn't be wiggly. So right away we can ignore all consecutive duplicates.
#
# Imagine the given array contains [..., 10, 7, 11, 13, 17, 19, 23, 20, ...].
# So increasing from 7 to 23. What can we do with that? Well we can't use more than two of those increasing numbers,
# as that wouldn't be wiggly. And if we do use two,
# we'd better use the 7 and the 23, as that offers the best extensibility
# (for example, the 19 wouldn't allow to next pick the 20 for the wiggly subsequence).
# And if we do use only one, it still should be either the 7 or the 23, as the 7 is the best wiggle-low
# and the 23 is the best wiggle-high of them. So whether we actually use the 7 and the 23 or not,
# we definitely can and should remove the 11, 13, 17, and 19. So then we have [..., 10, 7, 23, 20, ...].
# Now, notice that the 7 is a local minimum (both the 10 and the 23 are larger) and the 23 is a local maximum.
# And if we do this with all increasing or decreasing streaks, i.e., keep only their first and last number,
# then all the numbers we have left are local extrema, either smaller than both neighbors or larger than both neighbors.
# Which means that at that point, we're already fully wiggly.
# And we only removed as many numbers as we have to. So it's a longest possible wiggly subsequence.
#
# My solution first computes differences of neighbors and throws out zeros
# (which does get rid of those useless consecutive duplicates).
# And then it just counts the local extrema (by checking two consecutive differences).
#
# I use nan for some convenience, I'll let you figure that part out :-)
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return len(nums)
length, up = 1, None
for i in xrange(1, len(nums)):
if nums[i - 1] < nums[i] and (up is None or up is False):
length += 1
up = True
elif nums[i - 1] > nums[i] and (up is None or up is True):
length += 1
up = False
return length
#20ms 99.55%
def wiggleMaxLength2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nan = float('nan')
diffs = [a-b for a, b in zip([nan] + nums, nums + [nan]) if a-b]
return sum(not d*e >= 0 for d, e in zip(diffs, diffs[1:]))
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/wiggle-subsequence/solution/
In Wiggle Subsequence, think that the solution we need should be in a way that we get alternative higher,
lower,higher number.
Eg: 2, 5, 3, 8, 6, 9
In above example, the sequence of numbers is small,big,small,big,small,big numbers (In shape of hill).
Now for explanation, we take example series:
2,1,4,5,6,3,3,4,8,4
First we check if the series is starting as (big, small) or (small, big). So as 2,1 is big, small.
So we will start the loop as we need small number first that is 1 as 2 is already there.
Step 1: First we check our requirement is to get small number. As 1<2 so the series will be
2,1
Step 2: Now we need big number that is greater than 1. As 4>1 so series will be
2,1,4
Step 3: Now we need small number. But 5>4 so 4 will be replaced by 5. So the series will become
2,1,5
Step 4: We need small number. But 6>5. Series will be
2,1,6
Step 5: Require small number. 3<6. Series will be
2,1,6,3
Step 6: Require big number. 3=3. No change in series
2,1,6,3
Step 7: Require big number. 4>3. Series will become
2,1,6,3,4
Step 8: Require small number. 8>4. 8 will replace 4 and series will become
2,1,6,3,8
Step 9: Require small number. 4<8. So final series will be
2,1,6,3,8,4
Answer is 6.
In the code, for constant space O(1) we will modify the same 'num' array to store the (small, big, small)
hill shape values. So the code will not only calculate the length of the sequence but if the interviewers
asks for the Wiggle series also then we can return the series too. The leetcode Online Judge skipped a
test case if the series starts with same set of numbers.
modified the code to consider that test case also.
#100% 0ms
class Solution {
public int wiggleMaxLength(int[] nums) {
if (nums.length == 0 || nums.length == 1) {
return nums.length;
}
int k = 0;
while (k < nums.length - 1 && nums[k] == nums[k + 1]) { //Skips all the same numbers from series beginning eg 5, 5, 5, 1
k++;
}
if (k == nums.length - 1) {
return 1;
}
int result = 2; // This will track the result of result array
boolean smallReq = nums[k] < nums[k + 1]; //To check series starting patter
for (int i = k + 1; i < nums.length - 1; i++) {
if (smallReq && nums[i + 1] < nums[i]) {
nums[result] = nums[i + 1];
result++;
smallReq = !smallReq; //Toggle the requirement from small to big number
} else {
if (!smallReq && nums[i + 1] > nums[i]) {
nums[result] = nums[i + 1];
result++;
smallReq = !smallReq; //Toggle the requirement from big to small number
}
}
}
return result;
}
}
2. DP:
For every position in the array, there are only three possible statuses for it.
up position, it means nums[i] > nums[i-1]
down position, it means nums[i] < nums[i-1]
equals to position, nums[i] == nums[i-1]
So we can use two arrays up[] and down[] to record the max wiggle sequence length so far at index i.
If nums[i] > nums[i-1], that means it wiggles up.
the element before it must be a down position.
so up[i] = down[i-1] + 1; down[i] keeps the same with before.
If nums[i] < nums[i-1], that means it wiggles down. the element before it must be a up position.
so down[i] = up[i-1] + 1; up[i] keeps the same with before.
If nums[i] == nums[i-1], that means it will not change anything becasue it didn't wiggle at all.
so both down[i] and up[i] keep the same.
In fact, we can reduce the space complexity to O(1), but current way is more easy to understanding.
#100% 0ms
public class Solution {
public int wiggleMaxLength(int[] nums) {
if( nums.length == 0 ) return 0;
int[] up = new int[nums.length];
int[] down = new int[nums.length];
up[0] = 1;
down[0] = 1;
for(int i = 1 ; i < nums.length; i++){
if( nums[i] > nums[i-1] ){
up[i] = down[i-1]+1;
down[i] = down[i-1];
}else if( nums[i] < nums[i-1]){
down[i] = up[i-1]+1;
up[i] = up[i-1];
}else{
down[i] = down[i-1];
up[i] = up[i-1];
}
}
return Math.max(down[nums.length-1],up[nums.length-1]);
}
}
to space(1)
#100% 0ms
public class Solution {
public int wiggleMaxLength(int[] nums) {
if (nums.length < 2)
return nums.length;
int down = 1, up = 1;
for (int i = 1; i < nums.length; i++) {
if (nums[i] > nums[i - 1])
up = down + 1;
else if (nums[i] < nums[i - 1])
down = up + 1;
}
return Math.max(down, up);
}
}
3. Greedy: finding the number of alternating max. and min. peaks in the array.
#100% 0ms
class Solution {
public int wiggleMaxLength(int[] nums) {
if (nums.length < 2)
return nums.length;
int prevdiff = nums[1] - nums[0];
int count = prevdiff != 0 ? 2 : 1;
for (int i = 2; i < nums.length; i++) {
int diff = nums[i] - nums[i - 1];
if ((diff > 0 && prevdiff <= 0) || (diff < 0 && prevdiff >= 0)) {
count++;
prevdiff = diff;
}
}
return count;
}
}
'''
|
from npnlp import minimize
import numpy as np
tol = 1e-6
def test_sqp1():
def J(x):
return np.array([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])
x0 = np.array([0.5, 3.0])
nil = np.array([])
out = minimize(J, x0, Aeq=np.array([[1,0]]), beq=np.array([1]), method='SQP')
assert abs(out['x'][0] - 1) < tol
assert abs(out['x'][1] - 0.5) < tol
assert abs(out['grad'][0] - 3) < tol
assert abs(out['grad'][1] - 0) < tol
assert abs(out['kkt'].equality_linear[0] + 3) < tol
def test_sqp2():
def J(x):
return np.array([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])
x0 = np.array([0.5, 3.0])
nil = np.array([])
out = minimize(J, x0, A=np.array([[1,0]]), b=np.array([-1]), method='SQP')
assert abs(out['x'][0] + 1) < tol
assert abs(out['x'][1] - 0.5) < tol
assert abs(out['grad'][0] + 3) < tol
assert abs(out['grad'][1] - 0) < tol
assert abs(out['kkt'].inequality_linear[0] - 3) < tol
def test_sqp3():
def J(x):
return np.array([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])
def eq_con(x, kkt):
return np.array([1 - 2 * x[0] * x[1] / 3, (3 * x[0] ** 2 - 4 * x[1]) / 3 + 1])
x0 = np.array([0.5, 3.0])
nil = np.array([])
out = minimize(J, x0, nonlconeq=eq_con, method='SQP')
assert abs(out['x'][0] - 1) < tol
assert abs(out['x'][1] - 1.5) < tol
assert abs(out['grad'][0] - 1) < tol
assert abs(out['grad'][1] - 2) < tol
assert abs(out['kkt'].equality_nonlinear[0] - 2) < tol
assert abs(out['kkt'].equality_nonlinear[1] - 0.5) < tol
def test_sqp4():
def J(x):
return np.array([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])
def eq_con(x, l):
return np.array([1 - 2 * x[0] * x[1] / 3, (3 * x[0] ** 2 - 4 * x[1]) / 3 + 1])
x0 = np.array([0.5, 3.0])
nil = np.array([])
out = minimize(J, x0, nonlconineq=eq_con, method='SQP')
assert abs(out['x'][0] - 1) < tol
assert abs(out['x'][1] - 1.5) < tol
assert abs(out['grad'][0] - 1) < tol
assert abs(out['grad'][1] - 2) < tol
assert abs(out['kkt'].inequality_nonlinear[0] - 2) < tol
assert abs(out['kkt'].inequality_nonlinear[1] - 0.5) < tol
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.types import freeze
DEPS = [
'depot_tools/bot_update',
'chromium',
'commit_position',
'file',
'depot_tools/gclient',
'gsutil',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
]
BUCKET_NAME = 'chrome-codesearch'
CHROMIUM_GIT_URL = 'https://chromium.googlesource.com'
# Lists the additional repositories that should be checked out to be included
# in the source archive that is indexed by Codesearch.
ADDITIONAL_REPOS = freeze({
'infra': '%s/infra/infra' % CHROMIUM_GIT_URL,
'tools/chrome-devtools-frontend':\
'%s/chromium/tools/chrome-devtools-frontend' % CHROMIUM_GIT_URL,
'tools/chromium-jobqueue':\
'%s/chromium/tools/chromium-jobqueue' % CHROMIUM_GIT_URL,
'tools/chromium-shortener':\
'%s/chromium/tools/chromium-shortener' % CHROMIUM_GIT_URL,
'tools/command_wrapper/bin':\
'%s/chromium/tools/command_wrapper/bin' % CHROMIUM_GIT_URL,
'tools/depot_tools': '%s/chromium/tools/depot_tools' % CHROMIUM_GIT_URL,
'tools/deps2git': '%s/chromium/tools/deps2git' % CHROMIUM_GIT_URL,
'tools/gsd_generate_index':\
'%s/chromium/tools/gsd_generate_index' % CHROMIUM_GIT_URL,
'tools/perf': '%s/chromium/tools/perf' % CHROMIUM_GIT_URL,
})
LINUX_GN_ARGS = [
'is_clang=true',
'is_component_build=true',
'is_debug=true',
'goma_dir="/b/build/goma"',
'symbol_level=1',
'target_cpu="x64"',
'use_goma=true',
]
CHROMEOS_GN_ARGS = LINUX_GN_ARGS + [
'target_os="chromeos"',
'use_ozone=true',
]
SPEC = freeze({
# The builders have the following parameters:
# - compile_targets: the compile targets.
# - environment: The environment of the bot (prod / staging).
# - package_filename: The prefix of the name of the source archive.
# - platform: The platform for which the code is compiled.
'builders': {
'Chromium Linux Codesearch': {
'compile_targets': [
'All',
],
'environment': 'prod',
'package_filename': 'chromium-src',
'platform': 'linux',
},
'ChromiumOS Codesearch': {
'compile_targets': [
'All',
],
'environment': 'prod',
'package_filename': 'chromiumos-src',
'platform': 'chromeos',
},
'Chromium Linux Codesearch Builder': {
'compile_targets': [
'All',
],
'environment': 'staging',
'package_filename': 'chromium-src',
'platform': 'linux',
},
'ChromiumOS Codesearch Builder': {
'compile_targets': [
'All',
],
'environment': 'staging',
'package_filename': 'chromiumos-src',
'platform': 'chromeos',
},
},
})
def GenerateCompilationDatabase(api, debug_path, targets, platform):
# TODO(akuegel): If we ever build on Windows or Mac, this needs to be
# adjusted.
gn_path = api.path['checkout'].join('buildtools', 'linux64', 'gn')
args = LINUX_GN_ARGS if platform == 'linux' else CHROMEOS_GN_ARGS
command = [gn_path, 'gen', debug_path, '--args=%s' % ' '.join(args)]
api.step('generate build files for %s' % platform, command,
cwd=api.path['checkout'])
command = ['ninja', '-C', debug_path] + list(targets)
# Add the parameters for creating the compilation database.
command += ['-t', 'compdb', 'cc', 'cxx', 'objc', 'objcxx']
return api.step('generate compilation database for %s' % platform,
command,
stdout=api.raw_io.output())
def RunSteps(api):
buildername = api.properties.get('buildername')
bot_config = SPEC.get('builders', {}).get(buildername)
platform = bot_config.get('platform', 'linux')
# Checkout the repositories that are either directly needed or should be
# included in the source archive.
gclient_config = api.gclient.make_config('chromium', GIT_MODE=True)
for name, url in ADDITIONAL_REPOS.iteritems():
solution = gclient_config.solutions.add()
solution.name = name
solution.url = url
api.gclient.c = gclient_config
update_step = api.bot_update.ensure_checkout()
api.chromium.set_build_properties(update_step.json.output['properties'])
# Remove the llvm-build directory, so that gclient runhooks will download
# the pre-built clang binary and not use the locally compiled binary from
# the 'compile translation_unit clang tool' step.
api.file.rmtree('llvm-build',
api.path['checkout'].join('third_party', 'llvm-build'))
debug_path = api.path['checkout'].join('out', 'Debug')
targets = bot_config.get('compile_targets', [])
api.chromium.set_config('codesearch', BUILD_CONFIG='Debug')
api.chromium.runhooks()
result = GenerateCompilationDatabase(api, debug_path, targets, platform)
try:
api.chromium.compile(targets)
except api.step.StepFailure as f: # pragma: no cover
# Even if compilation fails, the Grok indexer may still be able to extract
# (almost) all cross references. And the downside of failing on compile
# error is that Codesearch gets stale.
pass
environment = bot_config.get('environment', 'prod')
if environment == 'staging':
return
# Copy the created output to the correct directory. When running the clang
# tool, it is assumed by the scripts that the compilation database is in the
# out/Debug directory, and named 'compile_commands.json'.
api.step('copy compilation database',
['cp', api.raw_io.input(data=result.stdout),
debug_path.join('compile_commands.json')])
if platform == 'chromeos':
result = GenerateCompilationDatabase(api, debug_path, targets, 'linux')
api.python('Filter out duplicate compilation units',
api.path['build'].join('scripts', 'slave', 'chromium',
'filter_compilations.py'),
['--compdb-input', debug_path.join('compile_commands.json'),
'--compdb-filter', api.raw_io.input(data=result.stdout),
'--compdb-output', debug_path.join('compile_commands.json')])
# Compile the clang tool
script_path = api.path.sep.join(['tools', 'clang', 'scripts', 'update.py'])
api.step('compile translation_unit clang tool',
[script_path, '--force-local-build', '--without-android',
'--tools', 'translation_unit'],
cwd=api.path['checkout'])
# Run the clang tool
args = [api.path['checkout'].join('third_party', 'llvm-build',
'Release+Asserts', 'bin',
'translation_unit'),
debug_path, '--all']
try:
api.python(
'run translation_unit clang tool',
api.path['checkout'].join('tools', 'clang', 'scripts', 'run_tool.py'),
args)
except api.step.StepFailure as f:
# For some files, the clang tool produces errors. This is a known issue,
# but since it only affects very few files (currently 9), we ignore these
# errors for now. At least this means we can already have cross references
# support for the files where it works.
api.step.active_result.presentation.step_text = f.reason_message()
api.step.active_result.presentation.status = api.step.WARNING
# Create the index pack
got_revision_cp = api.chromium.build_properties.get('got_revision_cp')
commit_position = api.commit_position.parse_revision(got_revision_cp)
index_pack_name = 'index_pack_%s.zip' % platform
index_pack_name_with_revision = 'index_pack_%s_%s.zip' % (
platform, commit_position)
api.python('create index pack',
api.path['build'].join('scripts', 'slave', 'chromium',
'package_index.py'),
['--path-to-compdb', debug_path.join('compile_commands.json'),
'--path-to-archive-output', debug_path.join(index_pack_name)])
# Upload the index pack
api.gsutil.upload(
name='upload index pack',
source=debug_path.join(index_pack_name),
bucket=BUCKET_NAME,
dest='%s/%s' % (environment, index_pack_name_with_revision)
)
# Package the source code.
tarball_name = 'chromium_src_%s.tar.bz2' % platform
tarball_name_with_revision = 'chromium_src_%s_%s.tar.bz2' % (
platform,commit_position)
api.python('archive source',
api.path['build'].join('scripts','slave',
'archive_source_codesearch.py'),
['src', 'build', 'infra', 'tools', '-f',
tarball_name])
# Upload the source code.
api.gsutil.upload(
name='upload source tarball',
source=api.path['slave_build'].join(tarball_name),
bucket=BUCKET_NAME,
dest='%s/%s' % (environment, tarball_name_with_revision)
)
def _sanitize_nonalpha(text):
return ''.join(c if c.isalnum() else '_' for c in text)
def GenTests(api):
for buildername, config in SPEC['builders'].iteritems():
platform = config.get('platform')
test = api.test('full_%s' % (_sanitize_nonalpha(buildername)))
test += api.step_data('generate compilation database for %s' % platform,
stdout=api.raw_io.output('some compilation data'))
if platform == 'chromeos' and config.get('environment') == 'prod':
test += api.step_data('generate compilation database for linux',
stdout=api.raw_io.output('some compilation data'))
test += api.properties.generic(buildername=buildername,
mastername='chromium.infra.cron')
yield test
yield (
api.test(
'full_%s_fail' % _sanitize_nonalpha('ChromiumOS Codesearch')) +
api.step_data('generate compilation database for chromeos',
stdout=api.raw_io.output('some compilation data')) +
api.step_data('generate compilation database for linux',
stdout=api.raw_io.output('some compilation data')) +
api.step_data('run translation_unit clang tool', retcode=2) +
api.properties.generic(buildername='ChromiumOS Codesearch',
mastername='chromium.infra.cron')
)
|
#!/usr/bin/env python
# coding=utf-8
"""A simple example demonstrating how to remove unused commands.
Commands can be removed from help menu and tab completion by appending their command name to the hidden_commands list.
These commands will still exist and can be executed and help can be retrieved for them by
name, they just won't clutter the help menu.
Commands can also be removed entirely by using Python's "del".
"""
import cmd2
class RemoveUnusedBuiltinCommands(cmd2.Cmd):
""" Example cmd2 application where we remove some unused built-in commands."""
def __init__(self):
super().__init__()
# To hide commands from displaying in the help menu, add them to the hidden_commands list
self.hidden_commands.append('py')
# To remove built-in commands entirely, delete their "do_*" function from the cmd2.Cmd class
del cmd2.Cmd.do_edit
if __name__ == '__main__':
app = RemoveUnusedBuiltinCommands()
app.cmdloop()
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_data_pipes import ETL, EngineRegistry, Signal
from celery import Celery
from importlib import import_module
from config import config
import os
BASE = os.path.dirname(__file__)
db = SQLAlchemy()
etl = ETL()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
app.signal = Signal()
app.engine = EngineRegistry(app=app, db=db)
app.celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URI'])
etl.init_app(app, db)
return app
def import_models(app):
with app.app_context():
for module_name in app.config['MODULES']:
try:
import_module(f'{BASE}.{module_name}.models', package=__name__)
except (AttributeError, ModuleNotFoundError):
continue
app.signal.etl_tables_imported.send(app)
def import_tasks(app):
with app.app_context():
for module_name in app.config['MODULES']:
try:
import_module(f'{BASE}.{module_name}.tasks', package=__name__)
app.logger.info(f'Task module imported: {module_name}')
except ModuleNotFoundError:
app.logger.warn(f"Task module not imported: {BASE}.{module_name}.tasks not found.")
continue
app = create_app(os.getenv('APPENV', 'default'))
import_models(app)
if __name__ == '__main__':
app.run(port=8080, threaded=True)
|
"""Operators of the N-Key-Panel"""
#
# The operators of the N-Key-Panel.
#
# Copyright (c) 2021 Keith Pinson
import bpy
from bpy.types import Operator
from ..utils.collection_utils import collection_add
from ..utils.object_utils import\
object_add, object_get_or_add_empty, object_parent_all, object_make_active
from ..citySketch import sketch_object
class CVB_OT_NewSketchButton(Operator):
# pylint: disable=invalid-name
"""New Sketch Button"""
bl_idname = 'cvb.new_sketch_button'
bl_label = 'New'
bl_options = {"REGISTER", "UNDO"}
bl_description = """Add a new City Map Sketch"""
def execute(self, context):
"""Operator to add a New Sketch behaves like a button when only execute() is defined"""
#
# Example With Empties
#
# The one master empty is used to control the transforms of the sketch,
# map
# CVB [collection]
# City1_g1x1.001 [collection]
# City1_g1x1.001 Transform [empty of cube shape] Hide in viewport (parent)
# Sketch ~ City1_g1x1.001 [collection]
# Sketch Plane ~ City1_g1x1.001 [plane mesh] (child)
cvb = context.scene.CVB
size = (cvb.sketch_xy_linked_prop, cvb.sketch_xy_linked_prop) if \
cvb.using_tile_id_prop else (cvb.sketch_x_prop, cvb.sketch_y_prop)
new_sketchname = cvb.city_props.sketch_name_with_next_variant(cvb)
if not new_sketchname:
return {"CANCEL"}
# Collection
sketch_path = "/CVB/{0}/Sketch ~ {0}".format(new_sketchname)
collection_add(sketch_path)
# Sketch Object
sketch_name = "{0} Sketch".format(new_sketchname)
object_add(sketch_path, sketch_name, sketch_object.CitySketch(sketch_name, size[0], size[1]).obj)
# Transform Empty
cvb_path = "/CVB/{0}".format(new_sketchname)
empty_name = "{0} Transform".format(new_sketchname)
empty = object_get_or_add_empty(cvb_path, empty_name, radius=0.12, display_type='CUBE')
if empty:
object_parent_all(empty, sketch_path)
# Active Object
sketch_path_and_name = sketch_path + "/" + sketch_name
object_make_active(sketch_path_and_name)
# Refresh the list after we've done everything
cvb.city_props.refresh_sketch_list(cvb)
cvb.city_props.update_city_name_prop(context)
# And Lastly, Show Minimized
cvb.sketch_minimized_prop = True
cvb.mini_sketch_add_or_toggle(True)
return {"FINISHED"}
|
import logging
from logging import NullHandler
from .operation import Operation as _Operation
from .constant import Constant as _Constant
from .addition import Addition as _Addition
from .subtraction import Subtraction as _Subtraction
from .multiplication import Multiplication as _Multiplication
from .division import Division as _Division
__all__ = ['catalogue']
logging.getLogger(__name__).addHandler(NullHandler())
def catalogue():
"""
Loads operations and constructs a mapping out of them
:return: Mapping of operation tag and Operation class
"""
catalog = {sc.TAG: sc for sc in _Operation.__subclasses__()}
return catalog
|
# Generated by Django 3.1.7 on 2021-05-14 12:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('bank', '0008_auto_20210514_1337'),
]
operations = [
migrations.CreateModel(
name='RequestForOpeningDeposit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deposit_amount', models.FloatField(verbose_name='Deposit Amount')),
('deposit_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='bank.depositstype')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RequestForOpeningCredit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('credit_amount', models.FloatField(verbose_name='Credit Amount')),
('credit_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='bank.creditstype')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from datetime import datetime, timedelta
import logging, logging.config
from time import sleep, time
import HTMLParser
import praw
import re2 as re
import yaml
from requests.exceptions import HTTPError
from sqlalchemy.sql import and_
from sqlalchemy.orm.exc import NoResultFound
from models import cfg_file, path_to_cfg, session
from models import Log, StandardCondition, Subreddit
# global reddit session
r = None
class Condition(object):
_defaults = {'reports': None,
'author_is_submitter': None,
'is_reply': None,
'ignore_blockquotes': False,
'moderators_exempt': True,
'body_min_length': None,
'body_max_length': None,
'priority': 0,
'action': None,
'comment': None,
'modmail': None,
'modmail_subject': 'AutoModerator notification',
'message': None,
'message_subject': 'AutoModerator notification',
'link_flair_text': '',
'link_flair_class': '',
'user_flair_text': '',
'user_flair_class': '',
'user_conditions': {},
'set_options': [],
'modifiers': [],
'overwrite_user_flair': False}
_match_targets = ['link_id', 'user', 'title', 'domain', 'url', 'body',
'media_user', 'media_title', 'media_description',
'media_author_url',
'author_flair_text', 'author_flair_css_class',
'link_title', 'link_url']
_match_modifiers = {'full-exact': u'^{0}$',
'full-text': ur'^\W*{0}\W*$',
'includes': u'{0}',
'includes-word': ur'(?:^|\W|\b){0}(?:$|\W|\b)',
'starts-with': u'^{0}',
'ends-with': u'{0}$'}
_modifier_defaults = {'link_id': 'full-exact',
'user': 'full-exact',
'domain': 'full-exact',
'url': 'includes',
'media_user': 'full-exact',
'media_author_url': 'includes',
'author_flair_text': 'full-exact',
'author_flair_css_class': 'full-exact',
'link_url': 'includes'}
_standard_cache = {}
_standard_rows = None
@classmethod
def update_standards(cls):
standards = session.query(StandardCondition).all()
if standards != cls._standard_rows:
cls._standard_cache = {cond.name.lower(): yaml.safe_load(cond.yaml)
for cond in standards}
cls._standard_rows = standards
return True
return False
@classmethod
def get_standard_condition(cls, name):
return cls._standard_cache.get(name.lower(), dict())
@property
def requests_required(self):
# all things that will require an additional request
reqs = sum(1 for i in
(self.action, self.user_conditions, self.comment,
self.modmail, self.message,
(self.user_flair_text or self.user_flair_class),
(self.link_flair_text or self.link_flair_class))
if i)
# one extra request for distinguishing a comment
if self.comment:
reqs += 1
if self.set_options:
reqs += len(set(self.set_options))
return reqs
def __init__(self, values):
values = lowercase_keys_recursively(values)
self.yaml = yaml.dump(values)
# anything not defined in the "values" dict will be defaulted
init = self._defaults.copy()
# inherit from standard condition if they specified one
if 'standard' in values:
init.update(Condition.get_standard_condition(values['standard']))
init.update(values)
# convert the dict to attributes
self.__dict__.update(init)
# set match target/pattern definitions
self.match_patterns = {}
self.match_success = {}
self.match_flags = {}
match_fields = set()
for key in [k for k in init
if self.trimmed_key(k) in self._match_targets or '+' in k]:
if isinstance(self.modifiers, dict):
modifiers = self.modifiers.get(key, [])
else:
modifiers = self.modifiers
self.match_patterns[key] = self.get_pattern(key, modifiers)
if 'inverse' in modifiers or key.startswith('~'):
self.match_success[key] = False
else:
self.match_success[key] = True
# default match flags
self.match_flags[key] = re.DOTALL|re.UNICODE
if 'case-sensitive' not in modifiers:
self.match_flags[key] |= re.IGNORECASE
for field in self.trimmed_key(key).split('+'):
match_fields.add(field)
# if type wasn't defined, set based on fields being matched against
if not getattr(self, 'type', None):
if (len(match_fields) > 0 and
all(f in ('title', 'domain', 'url',
'media_user', 'media_title', 'media_description',
'media_author_url')
for f in match_fields)):
self.type = 'submission'
else:
self.type = 'both'
if self.set_options and not isinstance(self.set_options, list):
self.set_options = self.set_options.split()
def trimmed_key(self, key):
subjects = key.lstrip('~')
subjects = re.sub(r'#.+$', '', subjects)
return subjects
def get_pattern(self, subject, modifiers):
# cast to lists, so we're not splitting a single string
if not isinstance(getattr(self, subject), list):
setattr(self, subject, [getattr(self, subject)])
if not isinstance(modifiers, list):
modifiers = list(modifiers.split(' '))
# cast all elements to strings in case of any numbers
values = [unicode(val) for val in getattr(self, subject)]
if 'regex' not in modifiers:
values = [re.escape(val) for val in values]
value_str = u'({0})'.format('|'.join(values))
# check if they defined a match modifier
for mod in self._match_modifiers:
if mod in modifiers:
match_mod = mod
break
else:
subject = self.trimmed_key(subject)
# handle subdomains for domain checks
if subject == 'domain':
value_str = ur'(?:.*?\.)?' + value_str
match_mod = self._modifier_defaults.get(subject, 'includes-word')
return self._match_modifiers[match_mod].format(value_str)
def check_item(self, item):
"""Checks an item against the condition.
Returns True if the condition is satisfied, False otherwise.
"""
html_parser = HTMLParser.HTMLParser()
# check number of reports if necessary
if self.reports and item.num_reports < self.reports:
return False
# check whether it's a reply or top-level comment if necessary
if self.is_reply is not None and self.is_reply != is_reply(item):
return False
# check whether the author is the submitter if necessary
if (self.author_is_submitter is not None and
isinstance(item, praw.objects.Comment)):
author_is_submitter = (item.author and
item.link_author != "[deleted]" and
item.author.name == item.link_author)
if self.author_is_submitter != author_is_submitter:
return False
# pull out the item's body and remove blockquotes if necessary
if isinstance(item, praw.objects.Submission):
body_string = item.selftext
else:
body_string = item.body
if self.ignore_blockquotes:
body_string = html_parser.unescape(body_string)
body_string = '\n'.join(line for line in body_string.splitlines()
if not line.startswith('>') and
len(line) > 0)
# check body length restrictions if necessary
if (self.body_min_length is not None or
self.body_max_length is not None):
# remove non-word chars on either end of the string
pattern = re.compile(r'^\W+', re.UNICODE)
body_text = pattern.sub('', body_string)
pattern = re.compile(r'\W+$', re.UNICODE)
body_text = pattern.sub('', body_text)
if (self.body_min_length is not None and
len(body_text) < self.body_min_length):
return False
if (self.body_max_length is not None and
len(body_text) > self.body_max_length):
return False
match = None
approve_shadowbanned = False
for subject in self.match_patterns:
sources = set(self.trimmed_key(subject).split('+'))
for source in sources:
approve_shadowbanned = False
if source == 'user' and item.author:
string = item.author.name
# allow approving shadowbanned if it's a username match
approve_shadowbanned = True
elif source == 'link_id':
# trim off the 't3_'
string = getattr(item, 'link_id', '')[3:]
elif source == 'body':
string = body_string
elif (source == 'url' and
getattr(item, 'is_self', False)):
# get rid of the url value for self-posts
string = ''
elif (source.startswith('media_') and
getattr(item, 'media', None)):
try:
if source == 'media_user':
string = item.media['oembed']['author_name']
elif source == 'media_title':
string = item.media['oembed']['title']
elif source == 'media_description':
string = item.media['oembed']['description']
elif source == 'media_author_url':
string = item.media['oembed']['author_url']
except KeyError:
string = ''
else:
string = getattr(item, source, '')
if not string:
string = ''
string = html_parser.unescape(string)
match = re.search(self.match_patterns[subject],
string,
self.match_flags[subject])
if match:
break
if bool(match) != self.match_success[subject]:
return False
# check user conditions
if not self.check_user_conditions(item):
return False
# matched, perform any actions
# don't approve shadowbanned users' posts except in special cases
if (self.action != 'approve' or
not self.check_shadowbanned or
not user_is_shadowbanned(item.author) or
approve_shadowbanned):
self.execute_actions(item, match)
return True
def check_user_conditions(self, item):
"""Checks an item's author against the defined requirements."""
# if no user conditions are set, no need to check at all
if not self.user_conditions:
return True
must_satisfy = self.user_conditions.get('must_satisfy', 'all')
user = item.author
for attr, compare in self.user_conditions.iteritems():
if attr == 'must_satisfy':
continue
# extract the comparison operator
operator = '='
if not isinstance(compare, bool):
operator_regex = '^(==?|<|>)'
match = re.search(operator_regex, compare)
if match:
operator = match.group(1)
compare = compare[len(operator):].strip()
if operator == '==':
operator = '='
# convert rank to a numerical value
if attr == 'rank':
rank_values = {'user': 0, 'contributor': 1, 'moderator': 2}
compare = rank_values[compare]
if user:
if attr == 'rank':
value = rank_values[get_user_rank(user, item.subreddit)]
elif attr == 'account_age':
user_date = datetime.utcfromtimestamp(user.created_utc)
value = (datetime.utcnow() - user_date).days
elif attr == 'combined_karma':
value = user.link_karma + user.comment_karma
else:
try:
value = getattr(user, attr, 0)
except HTTPError as e:
if e.response.status_code == 404:
# user is shadowbanned, never satisfies conditions
return False
else:
raise
else:
value = 0
if operator == '<':
result = int(value) < int(compare)
elif operator == '>':
result = int(value) > int(compare)
elif operator == '=':
result = int(value) == int(compare)
if result and must_satisfy == 'any':
return True
elif not result and must_satisfy == 'all':
return False
# if we reached this point, success depends on if this is any/all
if must_satisfy == 'any' and not result:
return False
return True
def execute_actions(self, item, match):
"""Performs the action(s) for the condition.
Also sends any comment/messages (if set) and creates a log entry.
"""
if self.action or self.comment or self.modmail or self.message:
log_actions = [self.action]
else:
log_actions = []
# perform the action
if self.action == 'remove':
item.remove(False)
elif self.action == 'spam':
item.remove(True)
elif self.action == 'approve':
item.approve()
elif self.action == 'report':
item.report()
# set thread options
if self.set_options and isinstance(item, praw.objects.Submission):
if 'nsfw' in self.set_options and not item.over_18:
item.mark_as_nsfw()
if 'contest' in self.set_options:
item.set_contest_mode(True)
if 'sticky' in self.set_options:
item.sticky()
# set flairs
if (isinstance(item, praw.objects.Submission) and
(self.link_flair_text or self.link_flair_class)):
text = replace_placeholders(self.link_flair_text, item, match)
css_class = replace_placeholders(self.link_flair_class, item, match)
item.set_flair(text, css_class.lower())
item.link_flair_text = text
item.link_flair_css_class = css_class.lower()
log_actions.append('link_flair')
if (self.user_flair_text or self.user_flair_class):
text = replace_placeholders(self.user_flair_text, item, match)
css_class = replace_placeholders(self.user_flair_class, item, match)
item.subreddit.set_flair(item.author, text, css_class.lower())
item.author_flair_text = text
item.author_flair_css_class = css_class.lower()
log_actions.append('user_flair')
if self.comment:
comment = self.build_message(self.comment, item, match,
disclaimer=True)
if isinstance(item, praw.objects.Submission):
response = item.add_comment(comment)
elif isinstance(item, praw.objects.Comment):
response = item.reply(comment)
response.distinguish()
if self.modmail:
message = self.build_message(self.modmail, item, match,
permalink=True)
subject = replace_placeholders(self.modmail_subject, item, match)
subject = subject[:100]
r.send_message('/r/'+item.subreddit.display_name, subject, message)
if self.message and item.author:
message = self.build_message(self.message, item, match,
disclaimer=True, permalink=True)
subject = replace_placeholders(self.message_subject, item, match)
subject = subject[:100]
r.send_message(item.author.name, subject, message)
log_entry = Log()
log_entry.item_fullname = item.name
log_entry.condition_yaml = self.yaml
log_entry.datetime = datetime.utcnow()
for entry in log_actions:
log_entry.action = entry
session.add(log_entry)
session.commit()
item_time = datetime.utcfromtimestamp(item.created_utc)
logging.info('Matched {0}, actions: {1} (age: {2})'
.format(get_permalink(item).encode('ascii', 'ignore'),
log_actions,
datetime.utcnow() - item_time))
def build_message(self, text, item, match,
disclaimer=False, permalink=False):
"""Builds a message/comment for the bot to post or send."""
message = text
if disclaimer:
message = message+'\n\n'+cfg_file.get('reddit', 'disclaimer')
if permalink and '{{permalink}}' not in message:
message = '{{permalink}}\n\n'+message
message = replace_placeholders(message, item, match)
return message
def update_from_wiki(subreddit, requester):
"""Updates conditions from the subreddit's wiki."""
global r
username = cfg_file.get('reddit', 'username')
try:
page = subreddit.get_wiki_page(cfg_file.get('reddit', 'wiki_page_name'))
except Exception:
send_error_message(requester, subreddit.display_name,
'The wiki page could not be accessed. Please ensure the page '
'http://www.reddit.com/r/{0}/wiki/{1} exists and that {2} '
'has the "wiki" mod permission to be able to access it.'
.format(subreddit.display_name,
cfg_file.get('reddit', 'wiki_page_name'),
username))
return False
html_parser = HTMLParser.HTMLParser()
page_content = html_parser.unescape(page.content_md)
# check that all the conditions are valid yaml
condition_defs = yaml.safe_load_all(page_content)
condition_num = 1
try:
for cond_def in condition_defs:
condition_num += 1
except Exception as e:
indented = ''
for line in str(e).split('\n'):
indented += ' {0}\n'.format(line)
send_error_message(requester, subreddit.display_name,
'Error when reading conditions from wiki - '
'Syntax invalid in section #{0}:\n\n{1}'
.format(condition_num, indented))
return False
# reload and actually process the conditions
condition_defs = yaml.safe_load_all(page_content)
condition_num = 1
kept_sections = []
for cond_def in condition_defs:
# ignore any non-dict sections (can be used as comments, etc.)
if not isinstance(cond_def, dict):
continue
cond_def = lowercase_keys_recursively(cond_def)
try:
check_condition_valid(cond_def)
except ValueError as e:
send_error_message(requester, subreddit.display_name,
'Invalid condition in section #{0} - {1}'
.format(condition_num, e))
return False
# create a condition for final checks
condition = Condition(cond_def)
# test to make sure that the final regex(es) are valid
for pattern in condition.match_patterns.values():
try:
re.compile(pattern)
except Exception as e:
send_error_message(requester, subreddit.display_name,
'Generated an invalid regex from section #{0} - {1}'
.format(condition_num, e))
return False
condition_num += 1
kept_sections.append(cond_def)
# Update the subreddit, or add it if necessary
try:
db_subreddit = (session.query(Subreddit)
.filter(Subreddit.name == subreddit.display_name.lower())
.one())
except NoResultFound:
db_subreddit = Subreddit()
db_subreddit.name = subreddit.display_name.lower()
db_subreddit.last_submission = datetime.utcnow() - timedelta(days=1)
db_subreddit.last_spam = datetime.utcnow() - timedelta(days=1)
db_subreddit.last_comment = datetime.utcnow() - timedelta(days=1)
session.add(db_subreddit)
db_subreddit.conditions_yaml = page_content
session.commit()
r.send_message(requester,
'{0} conditions updated'.format(username),
"{0}'s conditions were successfully updated for /r/{1}"
.format(username, subreddit.display_name))
return True
def lowercase_keys_recursively(subject):
"""Recursively lowercases all keys in a dict."""
lowercased = dict()
for key, val in subject.iteritems():
if isinstance(val, dict):
val = lowercase_keys_recursively(val)
lowercased[key.lower()] = val
return lowercased
def check_condition_valid(cond):
"""Checks if a condition defined on a wiki page is valid."""
validate_values_not_empty(cond)
validate_type(cond, 'standard', basestring)
if 'standard' in cond:
if not Condition.get_standard_condition(cond['standard']):
raise ValueError('Invalid standard condition: `{0}`'
.format(cond['standard']))
cond.update(Condition.get_standard_condition(cond['standard']))
validate_type(cond, 'user_conditions', dict)
validate_keys(cond)
validate_type(cond, 'author_is_submitter', bool)
validate_type(cond, 'is_reply', bool)
validate_type(cond, 'ignore_blockquotes', bool)
validate_type(cond, 'moderators_exempt', bool)
validate_type(cond, 'reports', int)
validate_type(cond, 'priority', int)
validate_type(cond, 'body_min_length', int)
validate_type(cond, 'body_max_length', int)
validate_type(cond, 'comment', basestring)
validate_type(cond, 'modmail', basestring)
validate_type(cond, 'modmail_subject', basestring)
validate_type(cond, 'message', basestring)
validate_type(cond, 'message_subject', basestring)
validate_type(cond, 'set_options', (basestring, list))
validate_type(cond, 'overwrite_user_flair', bool)
validate_value_in(cond, 'action', ('approve', 'remove', 'spam', 'report'))
validate_value_in(cond, 'type', ('submission', 'comment', 'both'))
validate_modifiers(cond)
# validate set_options
if 'set_options' in cond:
set_options = cond['set_options']
if not isinstance(set_options, list):
set_options = set_options.split()
for option in set_options:
if option not in ('nsfw', 'contest', 'sticky'):
raise ValueError('Invalid set_options value: `{0}`'.format(option))
# validate user conditions
if 'user_conditions' in cond:
user_conds = cond['user_conditions']
operator_regex = '((==?|<|>) )?'
oper_int_regex = '^'+operator_regex+'-?\d+$'
oper_rank_regex = '^'+operator_regex+'(user|contributor|moderator)$'
validate_regex(user_conds, 'account_age', oper_int_regex)
validate_regex(user_conds, 'comment_karma', oper_int_regex)
validate_regex(user_conds, 'link_karma', oper_int_regex)
validate_regex(user_conds, 'combined_karma', oper_int_regex)
validate_type(user_conds, 'is_gold', bool)
validate_regex(user_conds, 'rank', oper_rank_regex)
validate_value_in(user_conds, 'must_satisfy', ('any', 'all'))
def validate_values_not_empty(check):
"""Checks (recursively) that no values in the dict are empty."""
for key, val in check.iteritems():
if isinstance(val, dict):
validate_values_not_empty(val)
elif (val is None or
(isinstance(val, (basestring, list)) and len(val) == 0)):
raise ValueError('`{0}` set to an empty value'.format(key))
def validate_keys(check):
"""Checks if all the keys in the condition are valid."""
# check top-level keys
valid_keys = set(Condition._match_targets +
Condition._defaults.keys() +
['standard', 'type'])
for key in check:
key = key.lstrip('~')
key = re.sub(r'#.+$', '', key)
if key in valid_keys:
continue
# multiple subjects
if ('+' in key and
all(t in Condition._match_targets
for t in key.split('+'))):
continue
raise ValueError('Invalid variable: `{0}`'.format(key))
# check user_conditions keys
if 'user_conditions' in check:
valid_keys = set(['account_age', 'combined_karma', 'comment_karma',
'is_gold', 'link_karma', 'must_satisfy', 'rank'])
for key in check['user_conditions']:
if key not in valid_keys:
raise ValueError('Invalid user_conditions variable: `{0}`'
.format(key))
# check modifiers keys
if 'modifiers' in check and isinstance(check['modifiers'], dict):
for key in check['modifiers']:
if key not in check.keys():
raise ValueError('Invalid modifiers variable: `{0}` - '
'Check for typos and ensure all modifiers '
'correspond to a defined match subject.'
.format(key))
def validate_modifiers(check):
"""Checks that all modifier definitions in the condition are valid."""
if 'modifiers' not in check:
return
match_types = Condition._match_modifiers.keys()
valid_modifiers = set(match_types + ['case-sensitive', 'inverse', 'regex'])
if isinstance(check['modifiers'], dict):
to_validate = check['modifiers'].values()
else:
to_validate = list((check['modifiers'],))
for mod_list in to_validate:
# convert to a list if it's a string
if not isinstance(mod_list, list):
mod_list = mod_list.split(' ')
# make sure all modifiers are valid choices
for mod in mod_list:
if mod not in valid_modifiers:
raise ValueError('Invalid modifier: `{0}`'.format(mod))
# check that they specified no more than one match type modifier
if sum(1 for mod in mod_list if mod in match_types) > 1:
raise ValueError('More than one match type modifier (`{0}`) '
'specified.'.format(', '.join(match_types)))
def validate_value_in(check, key, valid_vals):
"""Validates that a dict value is in a list of valid choices."""
if key not in check:
return
if check[key] not in valid_vals:
raise ValueError('Invalid {0}: {1}'.format(key, check[key]))
def validate_type(check, key, req_type):
"""Validates that a dict value is of the correct type."""
if key not in check:
return
if req_type == int:
try:
int(str(check[key]))
except ValueError:
raise ValueError('{0} must be an integer'.format(key))
else:
if not isinstance(check[key], req_type):
raise ValueError('{0} must be {1}'.format(key, req_type))
def validate_regex(check, key, pattern):
"""Validates that a dict value matches a regex."""
if key not in check:
return
if not re.match(pattern, check[key]):
raise ValueError('Invalid {0}: {1}'.format(key, check[key]))
def send_error_message(user, sr_name, error):
"""Sends an error message to the user if a wiki update failed."""
global r
r.send_message(user,
'Error updating from wiki in /r/{0}'.format(sr_name),
'### Error updating from [wiki configuration in /r/{0}]'
'(http://www.reddit.com/r/{0}/wiki/{1}):\n\n---\n\n'
'{2}\n\n---\n\n[View configuration documentation](https://'
'github.com/Deimos/AutoModerator/wiki/Wiki-Configuration)'
.format(sr_name,
cfg_file.get('reddit', 'wiki_page_name'),
error))
def process_messages():
"""Processes the bot's messages looking for invites/commands."""
global r
stop_time = int(cfg_file.get('reddit', 'last_message'))
owner_username = cfg_file.get('reddit', 'owner_username')
new_last_message = None
update_srs = set()
invite_srs = set()
sleep_after = False
logging.debug('Checking messages')
try:
for message in r.get_inbox():
if int(message.created_utc) <= stop_time:
break
if message.was_comment:
continue
if not new_last_message:
new_last_message = int(message.created_utc)
# if it's a subreddit invite
if (not message.author and
message.subject.startswith('invitation to moderate /r/')):
invite_srs.add(message.subreddit.display_name.lower())
elif message.body.strip().lower() == 'update':
# handle if they put in something like '/r/' in the subject
if '/' in message.subject:
sr_name = message.subject[message.subject.rindex('/')+1:]
else:
sr_name = message.subject
if (sr_name.lower(), message.author.name) in update_srs:
continue
try:
subreddit = r.get_subreddit(sr_name)
if (message.author.name == owner_username or
message.author in subreddit.get_moderators()):
update_srs.add((sr_name.lower(), message.author.name))
else:
send_error_message(message.author, sr_name,
'You do not moderate /r/{0}'.format(sr_name))
except HTTPError as e:
send_error_message(message.author, sr_name,
'Unable to access /r/{0}'.format(sr_name))
elif (message.subject.strip().lower() == 'sleep' and
message.author.name == owner_username):
sleep_after = True
# accept subreddit invites
for subreddit in invite_srs:
try:
# workaround for praw clearing mod sub list on accept
mod_subs = r.user._mod_subs
r.accept_moderator_invite(subreddit)
r.user._mod_subs = mod_subs
r.user._mod_subs[subreddit] = r.get_subreddit(subreddit)
logging.info('Accepted mod invite in /r/{0}'
.format(subreddit))
except praw.errors.InvalidInvite:
pass
# do requested updates from wiki pages
updated_srs = []
for subreddit, sender in update_srs:
if update_from_wiki(r.get_subreddit(subreddit),
r.get_redditor(sender)):
updated_srs.append(subreddit)
logging.info('Updated from wiki in /r/{0}'.format(subreddit))
else:
logging.info('Error updating from wiki in /r/{0}'
.format(subreddit))
if sleep_after:
logging.info('Sleeping for 10 seconds')
sleep(10)
logging.info('Sleep ended, resuming')
except Exception as e:
logging.error('ERROR: {0}'.format(e))
raise
finally:
# update cfg with new last_message value
if new_last_message:
cfg_file.set('reddit', 'last_message', str(new_last_message))
cfg_file.write(open(path_to_cfg, 'w'))
return updated_srs
def replace_placeholders(string, item, match):
"""Replaces placeholders in the string."""
if isinstance(item, praw.objects.Comment):
string = string.replace('{{body}}', item.body)
string = string.replace('{{kind}}', 'comment')
else:
string = string.replace('{{body}}', item.selftext)
string = string.replace('{{kind}}', 'submission')
string = string.replace('{{domain}}', getattr(item, 'domain', ''))
string = string.replace('{{permalink}}', get_permalink(item))
string = string.replace('{{subreddit}}', item.subreddit.display_name)
if isinstance(item, praw.objects.Comment):
string = string.replace('{{title}}', item.link_title)
else:
string = string.replace('{{title}}', item.title)
string = string.replace('{{url}}', getattr(item, 'url', ''))
if item.author:
string = string.replace('{{user}}', item.author.name)
else:
string = string.replace('{{user}}', '[deleted]')
if getattr(item, 'media', None):
oembed_mapping = {'{{media_user}}': 'author_name',
'{{media_title}}': 'title',
'{{media_description}}': 'description',
'{{media_author_url}}': 'author_url'}
for placeholder, source in oembed_mapping.iteritems():
if placeholder in string:
try:
string = string.replace(placeholder,
item.media['oembed'][source])
except KeyError:
pass
# replace any {{match_##}} with the corresponding match groups
string = re.sub(r'\{\{match-(\d+)\}\}', r'\\\1', string)
if match:
string = match.expand(string)
return string
def check_items(queue, items, stop_time, sr_dict, cond_dict):
"""Checks the items generator for any matching conditions."""
item_count = 0
start_time = time()
last_updates = {}
logging.debug('Checking {0} queue'.format(queue))
bot_username = cfg_file.get('reddit', 'username')
for item in items:
# skip non-removed (reported) items when checking spam
if queue == 'spam' and not item.banned_by:
continue
# never check the bot's own comments
if (item.author and
item.author.name.lower() == bot_username.lower() and
isinstance(item, praw.objects.Comment)):
continue
item_time = datetime.utcfromtimestamp(item.created_utc)
if (item_time < stop_time and
(queue != 'submission' or not item.approved_by)):
break
sr_name = item.subreddit.display_name.lower()
subreddit = sr_dict[sr_name]
conditions = cond_dict[sr_name][queue]
if (queue != 'report' and
(queue != 'submission' or not item.approved_by) and
sr_name not in last_updates):
last_updates[sr_name] = item_time
# don't need to check for shadowbanned unless we're in spam
# and the subreddit doesn't exclude shadowbanned posts
if queue == 'spam' and not subreddit.exclude_banned_modqueue:
for condition in conditions:
condition.check_shadowbanned = True
else:
for condition in conditions:
condition.check_shadowbanned = False
item_count += 1
logging.debug('Checking item %s', get_permalink(item))
try:
# check removal conditions, stop checking if any matched
if check_conditions(subreddit, item,
[c for c in conditions
if c.action in ('remove', 'spam')],
stop_after_match=True):
continue
# check all other conditions
check_conditions(subreddit, item,
[c for c in conditions
if c.action not in ('remove', 'spam')])
except (praw.errors.ModeratorRequired,
praw.errors.ModeratorOrScopeRequired,
HTTPError) as e:
if not isinstance(e, HTTPError) or e.response.status_code == 403:
logging.error('Permissions error in /r/{0}'
.format(subreddit.name))
raise
except Exception as e:
logging.error('ERROR: {0}'.format(e))
# Update "last_" entries in db
for sr in last_updates:
setattr(sr_dict[sr], 'last_'+queue, last_updates[sr])
session.commit()
logging.debug('Checked {0} items in {1}'
.format(item_count, elapsed_since(start_time)))
def check_conditions(subreddit, item, conditions, stop_after_match=False):
"""Checks an item against a list of conditions.
Returns True if any conditions matched, False otherwise.
"""
bot_username = cfg_file.get('reddit', 'username')
if isinstance(item, praw.objects.Submission):
conditions = [c for c in conditions
if c.type in ('submission', 'both')]
elif isinstance(item, praw.objects.Comment):
conditions = [c for c in conditions
if c.type in ('comment', 'both')]
# get what's already been performed out of the log
performed_actions = set()
performed_yaml = set()
log_entries = (session.query(Log)
.filter(Log.item_fullname == item.name)
.all())
for entry in log_entries:
performed_actions.add(entry.action)
performed_yaml.add(entry.condition_yaml)
# sort the conditions by desc priority, and then by required requests
conditions.sort(key=lambda c: c.requests_required)
conditions.sort(key=lambda c: c.priority, reverse=True)
any_matched = False
for condition in conditions:
# don't check remove/spam/report conditions on posts made by mods
if (condition.moderators_exempt and
condition.action in ('remove', 'spam', 'report') and
item.author and
get_user_rank(item.author, item.subreddit) == 'moderator'):
continue
# never remove anything if it's been approved by another mod
if (condition.action in ('remove', 'spam') and
item.approved_by and
item.approved_by.name.lower() != bot_username.lower()):
continue
# don't bother checking condition if this action has already been done
if condition.action in performed_actions:
continue
# don't send repeat messages for the same item
if ((condition.comment or condition.modmail or condition.message) and
condition.yaml in performed_yaml):
continue
# don't overwrite existing flair
if ((condition.link_flair_text or condition.link_flair_class) and
isinstance(item, praw.objects.Submission) and
(item.link_flair_text or item.link_flair_css_class)):
continue
if ((condition.user_flair_text or condition.user_flair_class) and
(item.author_flair_text or item.author_flair_css_class) and
not condition.overwrite_user_flair):
continue
try:
start_time = time()
match = condition.check_item(item)
if match:
if condition.action:
performed_actions.add(condition.action)
performed_yaml.add(condition.yaml)
logging.debug('{0}\n Result {1} in {2}'
.format(condition.yaml,
match,
elapsed_since(start_time)))
except (praw.errors.ModeratorRequired,
praw.errors.ModeratorOrScopeRequired,
HTTPError) as e:
raise
except Exception as e:
logging.error('ERROR: {0}\n{1}'.format(e, condition.yaml))
match = False
any_matched = (any_matched or match)
if stop_after_match and any_matched:
break
return any_matched
def filter_conditions(conditions, queue):
"""Filters a list of conditions based on the queue's needs."""
if queue == 'spam':
return [c for c in conditions
if c.reports < 1 and
c.action != 'report']
elif queue == 'report':
return [c for c in conditions
if c.action != 'report' and
(c.action != 'approve' or c.reports > 0)]
elif queue == 'submission':
return [c for c in conditions
if c.type in ('both', 'submission') and
c.reports < 1 and
c.action != 'approve']
elif queue == 'comment':
return [c for c in conditions
if c.type in ('both', 'comment') and
c.reports < 1 and
c.action != 'approve']
def get_user_rank(user, subreddit):
"""Returns the user's rank in the subreddit."""
sr_name = subreddit.display_name.lower()
# fetch mod/contrib lists if necessary
cached = False
if sr_name in get_user_rank.moderator_cache:
cache_age = datetime.utcnow() - get_user_rank.cache_time[sr_name]
if cache_age < timedelta(hours=1):
cached = True
if not cached:
get_user_rank.cache_time[sr_name] = datetime.utcnow()
mod_list = set()
for mod in subreddit.get_moderators():
mod_list.add(mod.name)
get_user_rank.moderator_cache[sr_name] = mod_list
contrib_list = set()
try:
for contrib in subreddit.get_contributors():
contrib_list.add(contrib.name)
except HTTPError as e:
if e.response.status_code != 404:
raise
get_user_rank.contributor_cache[sr_name] = contrib_list
if user.name in get_user_rank.moderator_cache[sr_name]:
return 'moderator'
elif user.name in get_user_rank.contributor_cache[sr_name]:
return 'contributor'
else:
return 'user'
get_user_rank.moderator_cache = {}
get_user_rank.contributor_cache = {}
get_user_rank.cache_time = {}
def user_is_shadowbanned(user):
"""Returns True if the user is shadowbanned."""
global r
try: # try to get user overview
list(user.get_overview(limit=1))
except HTTPError as e:
# if that failed, they're probably shadowbanned
if e.response.status_code == 404:
return True
else:
raise
return False
def get_permalink(item):
"""Returns the permalink for the item."""
if isinstance(item, praw.objects.Submission):
return item.permalink
elif isinstance(item, praw.objects.Comment):
permalink = ('http://www.reddit.com/r/{0}/comments/{1}/-/{2}'
.format(item.subreddit.display_name,
item.link_id.split('_')[1],
item.id))
if is_reply(item):
permalink += '?context=5'
return permalink
def is_reply(item):
"""Returns True if the item is a reply (not a top-level comment)."""
if not isinstance(item, praw.objects.Comment):
return False
if item.parent_id.startswith('t1_'):
return True
return False
def elapsed_since(start_time):
"""Returns a timedelta for how much time has passed since start_time."""
elapsed = time() - start_time
return timedelta(seconds=elapsed)
def build_multireddit_groups(subreddits):
"""Splits a subreddit list into groups if necessary (due to url length)."""
multireddits = []
current_multi = []
current_len = 0
for sub in subreddits:
if current_len > 4700:
multireddits.append(current_multi)
current_multi = []
current_len = 0
current_multi.append(sub)
current_len += len(sub) + 1
multireddits.append(current_multi)
return multireddits
def check_queues(queue_funcs, sr_dict, cond_dict):
"""Checks all the queues for new items to process."""
global r
for queue in queue_funcs:
subreddits = [s for s in sr_dict
if s in cond_dict and len(cond_dict[s][queue]) > 0]
if len(subreddits) == 0:
continue
multireddits = build_multireddit_groups(subreddits)
# fetch and process the items for each multireddit
for multi in multireddits:
if queue == 'report':
limit = cfg_file.get('reddit', 'report_backlog_limit_hours')
stop_time = datetime.utcnow() - timedelta(hours=int(limit))
else:
stop_time = max(getattr(sr, 'last_'+queue)
for sr in sr_dict.values()
if sr.name in multi)
queue_subreddit = r.get_subreddit('+'.join(multi))
if queue_subreddit:
queue_func = getattr(queue_subreddit, queue_funcs[queue])
items = queue_func(limit=None)
check_items(queue, items, stop_time, sr_dict, cond_dict)
def update_conditions_for_sr(cond_dict, queues, subreddit):
cond_dict[subreddit.name] = {}
conditions = [Condition(d)
for d in yaml.safe_load_all(subreddit.conditions_yaml)
if isinstance(d, dict)]
for queue in queues:
cond_dict[subreddit.name][queue] = filter_conditions(conditions, queue)
def load_all_conditions(sr_dict, queues):
cond_dict = {}
for sr in sr_dict.values():
update_conditions_for_sr(cond_dict, queues, sr)
return cond_dict
def get_enabled_subreddits(reload_mod_subs=True):
global r
subreddits = (session.query(Subreddit)
.filter(Subreddit.enabled == True)
.all())
if reload_mod_subs:
r.user._mod_subs = None
logging.info('Getting list of moderated subreddits')
modded_subs = None
while not modded_subs:
try:
modded_subs = r.user.get_cached_moderated_reddits().keys()
except:
modded_subs = None
else:
modded_subs = r.user._mod_subs.keys()
# get rid of any subreddits the bot doesn't moderate
sr_dict = {sr.name.lower(): sr
for sr in subreddits
if sr.name.lower() in modded_subs}
return sr_dict
def main():
global r
logging.config.fileConfig(path_to_cfg)
re.set_fallback_notification(re.FALLBACK_EXCEPTION)
# which queues to check and the function to call
queue_funcs = {'report': 'get_reports',
'spam': 'get_mod_queue',
'submission': 'get_new',
'comment': 'get_comments'}
while True:
try:
r = praw.Reddit(user_agent=cfg_file.get('reddit', 'user_agent'))
logging.info('Logging in as {0}'
.format(cfg_file.get('reddit', 'username')))
r.login(cfg_file.get('reddit', 'username'),
cfg_file.get('reddit', 'password'))
sr_dict = get_enabled_subreddits()
Condition.update_standards()
cond_dict = load_all_conditions(sr_dict, queue_funcs.keys())
break
except Exception as e:
logging.error('ERROR: {0}'.format(e))
reports_mins = int(cfg_file.get('reddit', 'reports_check_period_mins'))
reports_check_period = timedelta(minutes=reports_mins)
last_reports_check = time()
while True:
try:
sr_dict = get_enabled_subreddits(reload_mod_subs=False)
# if the standard conditions have changed, reinit all conditions
if Condition.update_standards():
logging.info('Updating standard conditions from database')
cond_dict = load_all_conditions(sr_dict, queue_funcs.keys())
# check reports if past checking period
if elapsed_since(last_reports_check) > reports_check_period:
last_reports_check = time()
check_queues({'report': queue_funcs['report']},
sr_dict, cond_dict)
check_queues({q: queue_funcs[q]
for q in queue_funcs
if q != 'report'},
sr_dict, cond_dict)
updated_srs = process_messages()
if updated_srs:
if any(sr not in sr_dict for sr in updated_srs):
sr_dict = get_enabled_subreddits(reload_mod_subs=True)
else:
sr_dict = get_enabled_subreddits(reload_mod_subs=False)
for sr in updated_srs:
update_conditions_for_sr(cond_dict,
queue_funcs.keys(),
sr_dict[sr])
except (praw.errors.ModeratorRequired,
praw.errors.ModeratorOrScopeRequired,
HTTPError) as e:
if not isinstance(e, HTTPError) or e.response.status_code == 403:
logging.info('Re-initializing due to {0}'.format(e))
sr_dict = get_enabled_subreddits()
except KeyboardInterrupt:
raise
except Exception as e:
logging.error('ERROR: {0}'.format(e))
session.rollback()
if __name__ == '__main__':
main()
|
import sys
import src.game as game
import ai.ai as ai
if __name__ == "__main__":
if len(sys.argv) > 1:
ai.run() if str(sys.argv[1]) == 'ai' else game.run()
else:
game.run()
|
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.contrib.auth.models import User
# Create your models here.
class Movie(models.Model):
title = models.CharField(max_length=200)
genre = models.CharField(max_length=100)
movie_logo = models.FileField()
def __str__(self):
return self.title
class Movie3(models.Model):
adult = models.BooleanField()
belongs_to_collection= models.TextField()
budget = models.FloatField()
genres = models.TextField()
homepage= models.CharField(max_length=400)
#imp=models.IntegerField()
imdb_id = models.CharField(max_length=200)
original_language=models.CharField(max_length=200)
original_title=models.CharField(max_length=200)
overview= models.TextField()
popularity= models.FloatField()
poster_path = models.CharField(max_length=200)
production_companies = models.TextField()
production_countries = models.TextField()
release_date= models.CharField(max_length=200)
revenue = models.FloatField()
runtime = models.FloatField()
spoken_languages = models.TextField()
status = models.CharField(max_length=200)
tagline=models.CharField(max_length=400)
title=models.CharField(max_length=200)
video=models.TextField()
vote_average = models.FloatField()
vote_count = models.IntegerField()
imdb_url=models.CharField(max_length=200)
year = models.IntegerField()
img_url= models.TextField()
def __str__(self):
return self.title
class Movie4(models.Model):
adult = models.BooleanField()
belongs_to_collection= models.TextField()
budget = models.FloatField()
genres = models.TextField()
homepage= models.CharField(max_length=400)
imp=models.IntegerField()
imdb_id = models.CharField(max_length=200)
original_language=models.CharField(max_length=200)
original_title=models.CharField(max_length=200)
overview= models.TextField()
popularity= models.FloatField()
poster_path = models.CharField(max_length=200)
production_companies = models.TextField()
production_countries = models.TextField()
release_date= models.CharField(max_length=200)
revenue = models.FloatField()
runtime = models.FloatField()
spoken_languages = models.TextField()
status = models.CharField(max_length=200)
tagline=models.CharField(max_length=400)
title=models.CharField(max_length=200)
video=models.TextField()
vote_average = models.FloatField()
vote_count = models.IntegerField()
imdb_url=models.CharField(max_length=200)
year = models.IntegerField()
img_url= models.TextField()
def __str__(self):
return self.title
class Myrating(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
movie = models.ForeignKey(Movie4, on_delete=models.CASCADE)
rating = models.IntegerField(default=0, validators=[MaxValueValidator(10), MinValueValidator(0)])
class Meta:
unique_together = (("user", "movie"),)
index_together = (("user", "movie"),)
def __str__(self):
return str(self.user)+"_"+str(self.movie)+"_"+str(self.rating)
class MyList(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
movie = models.ForeignKey(Movie4, on_delete=models.CASCADE)
watch = models.BooleanField(default=False)
|
import requests
from websocket import create_connection
import json
def get_ws_connection(url):
ws = create_connection(url)
return ws
def get_transaction_details(ws, query):
# ws = create_connection(url)
ws.send(query)
result = ws.recv()
# ws.close()
return result
def get_gas_price(result):
result = json.loads(result)
try:
result = result['result']
gas_price = result["gasPrice"]
gas_price = int(gas_price,16)/1000000000
return gas_price
except Exception as e:
print(e)
return None
def get_gas_used(result):
result = json.loads(result)
try:
result = result['result']
gas_used = result['gasUsed']
gas_used = int(gas_used,16)
return gas_used
except Exception as e:
print(e)
return None
def get_gas(result):
result = json.loads(result)
try:
result = result['result']
gas = result['gas']
gas = int(gas,16)
return gas
except Exception as e:
print(e)
return None
|
import random
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
class QNet(nn.Module):
def __init__(self, n_features=4, n_actions=2, device=torch.device("cpu")):
super(QNet, self).__init__()
self.n_features = n_features
self.n_actions = n_actions
self.fc1 = nn.Linear(n_features, 128) # fully connected
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, n_actions)
self.version = 0
self.device = device
def forward(self, x):
if isinstance(x, np.ndarray):
x = torch.tensor(x, dtype=torch.float32, device=self.device)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_action(self, obs, epsilon=0.1):
# random.random(): 0.0과 1.0사이의 임의의 값을 반환
if random.random() < epsilon:
action = random.randrange(0, self.n_actions)
else:
out = self.forward(obs)
action = torch.argmax(out, dim=-1)
action = action.item()
return action # argmax: 가장 큰 값에 대응되는 인덱스 반환
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 fabian <fabian@Agrajag>
#
# Distributed under terms of the MIT license.
"""
"""
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
import sys
package = "com.samsung.android.email.provider"
device = MonkeyRunner.waitForConnection()
print("Uninstalling email")
device.removePackage(package)
print("Installing email")
installed = device.installPackage("/mnt/SharedFolder/Samsung Email_v6.1.12.1_apkpure.com.apk")
print("Install done. Result: " + str(installed))
device.shell("am force-stop com.samsung.android.email.provider")
#print(device.startActivity(component="com.google.android.gm.lite/com.google.android.gm.ConversationListActivityGmail"))
activity = "com.samsung.android.email.ui.settings.setup.login.LoginActivity"
runComponent = package + "/" + activity
device.startActivity(component=runComponent)
device.type("samsungsetup@example.org")
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.type("samsung")
print("Selecting manual setup")
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_ENTER", MonkeyDevice.DOWN_AND_UP)
#MonkeyRunner.sleep(2)
# Select IMAP
print("Selecting IMAP")
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_ENTER", MonkeyDevice.DOWN_AND_UP)
print("Waiting for next activity ...")
MonkeyRunner.sleep(3.0)
# Go to IMAP server
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
print("Setting IMAP server")
# Clear field
for i in range(20):
device.press("KEYCODE_DEL", device.DOWN_AND_UP)
device.type(sys.argv[1])
#GO to security setting
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_ENTER", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_ENTER", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_ENTER", MonkeyDevice.DOWN_AND_UP)
print("Setting SMTP server")
# Enter SMTP
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
# Clear field
for i in range(20):
device.press("KEYCODE_DEL", device.DOWN_AND_UP)
device.type(sys.argv[1])
#GO to security setting
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_ENTER", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_ENTER", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_ENTER", MonkeyDevice.DOWN_AND_UP)
# Go to sign in
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_TAB", MonkeyDevice.DOWN_AND_UP)
device.press("KEYCODE_ENTER", MonkeyDevice.DOWN_AND_UP)
print("Waiting for completed sign in")
MonkeyRunner.sleep(15.0)
device.shell("am force-stop com.samsung.android.email.provider")
activity = "com.samsung.android.email.composer.activity.MessageCompose"
runComponent = package + "/" + activity
device.startActivity(component=runComponent)
# Allow permissions
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_ENTER", device.DOWN_AND_UP)
device.press("KEYCODE_ENTER", device.DOWN_AND_UP)
# To
device.type("test@example.org")
# Subject
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.type("SENT")
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_ENTER", device.DOWN_AND_UP)
MonkeyRunner.sleep(10.0)
device.shell("am force-stop com.samsung.android.email.provider")
activity = "com.samsung.android.email.composer.activity.MessageCompose"
runComponent = package + "/" + activity
device.startActivity(component=runComponent)
# To
device.type("test@example.org")
# Subject
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.type("SENT")
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_ENTER", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_TAB", device.DOWN_AND_UP)
device.press("KEYCODE_ENTER", device.DOWN_AND_UP)
|
# Copyright 2019 Semaphore Solutions, Inc.
# ---------------------------------------------------------------------------
__version__ = '1.0.1'
|
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
values = [2, 3, 4, 5, 6, 7, 8]
def multiplyByTwo(n):
print(f'Processing {n} in {threading.current_thread()}')
return 2 * n
def main():
print('Starting ThreadPoolExecutor')
with ThreadPoolExecutor(max_workers=3) as executor:
results = executor.map(multiplyByTwo, values)
print([x for x in results])
print('All tasks complete')
if __name__ == '__main__':
main()
results = list(map(multiplyByTwo, values))
print(results)
'''
Starting ThreadPoolExecutor
Processing 2 in <Thread(ThreadPoolExecutor-0_0, started 123145572212736)>
Processing 3 in <Thread(ThreadPoolExecutor-0_0, started 123145572212736)>
Processing 4 in <Thread(ThreadPoolExecutor-0_2, started 123145605791744)>
Processing 5 in <Thread(ThreadPoolExecutor-0_0, started 123145572212736)>
Processing 7 in <Thread(ThreadPoolExecutor-0_2, started 123145605791744)>
Processing 6 in <Thread(ThreadPoolExecutor-0_1, started 123145589002240)>
Processing 8 in <Thread(ThreadPoolExecutor-0_0, started 123145572212736)>
[4, 6, 8, 10, 12, 14, 16]
All tasks complete
Processing 2 in <_MainThread(MainThread, started 4421772800)>
Processing 3 in <_MainThread(MainThread, started 4421772800)>
Processing 4 in <_MainThread(MainThread, started 4421772800)>
Processing 5 in <_MainThread(MainThread, started 4421772800)>
Processing 6 in <_MainThread(MainThread, started 4421772800)>
Processing 7 in <_MainThread(MainThread, started 4421772800)>
Processing 8 in <_MainThread(MainThread, started 4421772800)>
[4, 6, 8, 10, 12, 14, 16]
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.